...
This commit is contained in:
127
_pkg2_dont_use/heroagent/api/admin.go
Normal file
127
_pkg2_dont_use/heroagent/api/admin.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/system/stats"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// UptimeProvider defines an interface for getting system uptime
|
||||
type UptimeProvider interface {
|
||||
GetUptime() string
|
||||
}
|
||||
|
||||
// AdminHandler handles admin-related API routes
|
||||
type AdminHandler struct {
|
||||
uptimeProvider UptimeProvider
|
||||
statsManager *stats.StatsManager
|
||||
}
|
||||
|
||||
// NewAdminHandler creates a new AdminHandler
|
||||
func NewAdminHandler(uptimeProvider UptimeProvider, statsManager *stats.StatsManager) *AdminHandler {
|
||||
// If statsManager is nil, create a new one with default settings
|
||||
if statsManager == nil {
|
||||
var err error
|
||||
statsManager, err = stats.NewStatsManagerWithDefaults()
|
||||
if err != nil {
|
||||
// Log the error but continue with nil statsManager
|
||||
fmt.Printf("Error creating StatsManager: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &AdminHandler{
|
||||
uptimeProvider: uptimeProvider,
|
||||
statsManager: statsManager,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterRoutes registers all admin API routes
|
||||
func (h *AdminHandler) RegisterRoutes(app *fiber.App) {
|
||||
// API endpoints
|
||||
admin := app.Group("/api")
|
||||
|
||||
// @Summary Get hardware stats
|
||||
// @Description Get hardware statistics in JSON format
|
||||
// @Tags admin
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Success 200 {object} map[string]interface{}
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/hardware-stats [get]
|
||||
admin.Get("/hardware-stats", h.getHardwareStatsJSON)
|
||||
|
||||
// @Summary Get process stats
|
||||
// @Description Get process statistics in JSON format
|
||||
// @Tags admin
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Success 200 {object} map[string]interface{}
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/process-stats [get]
|
||||
admin.Get("/process-stats", h.getProcessStatsJSON)
|
||||
}
|
||||
|
||||
// getProcessStatsJSON returns process statistics in JSON format for API consumption
|
||||
func (h *AdminHandler) getProcessStatsJSON(c *fiber.Ctx) error {
|
||||
// Get process stats from the StatsManager (limit to top 30 processes)
|
||||
var processData *stats.ProcessStats
|
||||
var err error
|
||||
if h.statsManager != nil {
|
||||
processData, err = h.statsManager.GetProcessStats(30)
|
||||
} else {
|
||||
// Fallback to direct function call if StatsManager is not available
|
||||
processData, err = stats.GetProcessStats(30)
|
||||
}
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Failed to get process stats: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
// Convert to []fiber.Map for JSON response
|
||||
processStats := make([]fiber.Map, len(processData.Processes))
|
||||
for i, proc := range processData.Processes {
|
||||
processStats[i] = fiber.Map{
|
||||
"pid": proc.PID,
|
||||
"name": proc.Name,
|
||||
"status": proc.Status,
|
||||
"cpu_percent": proc.CPUPercent,
|
||||
"memory_mb": proc.MemoryMB,
|
||||
"create_time_str": proc.CreateTime,
|
||||
"is_current": proc.IsCurrent,
|
||||
}
|
||||
}
|
||||
|
||||
// Return JSON response
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"processes": processStats,
|
||||
"timestamp": time.Now().Unix(),
|
||||
})
|
||||
}
|
||||
|
||||
// getHardwareStatsJSON returns hardware stats in JSON format for API consumption
|
||||
func (h *AdminHandler) getHardwareStatsJSON(c *fiber.Ctx) error {
|
||||
// Get hardware stats from the StatsManager
|
||||
var hardwareStats map[string]interface{}
|
||||
if h.statsManager != nil {
|
||||
hardwareStats = h.statsManager.GetHardwareStatsJSON()
|
||||
} else {
|
||||
// Fallback to direct function call if StatsManager is not available
|
||||
hardwareStats = stats.GetHardwareStatsJSON()
|
||||
}
|
||||
|
||||
// Convert to fiber.Map for JSON response
|
||||
response := fiber.Map{
|
||||
"success": true,
|
||||
}
|
||||
for k, v := range hardwareStats {
|
||||
response[k] = v
|
||||
}
|
||||
|
||||
// Return JSON response
|
||||
return c.JSON(response)
|
||||
}
|
149
_pkg2_dont_use/heroagent/api/executor.go
Normal file
149
_pkg2_dont_use/heroagent/api/executor.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/sal/executor"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// ExecutorHandler handles executor-related API endpoints
|
||||
type ExecutorHandler struct {
|
||||
executor *executor.Executor
|
||||
}
|
||||
|
||||
// NewExecutorHandler creates a new executor handler
|
||||
func NewExecutorHandler(exec *executor.Executor) *ExecutorHandler {
|
||||
return &ExecutorHandler{
|
||||
executor: exec,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterRoutes registers executor routes to the fiber app
|
||||
func (h *ExecutorHandler) RegisterRoutes(app *fiber.App) {
|
||||
group := app.Group("/api/executor")
|
||||
|
||||
// @Summary Execute a command
|
||||
// @Description Execute a command and return a job ID
|
||||
// @Tags executor
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param command body ExecuteCommandRequest true "Command to execute"
|
||||
// @Success 200 {object} ExecuteCommandResponse
|
||||
// @Failure 400 {object} ErrorResponse
|
||||
// @Router /api/executor/execute [post]
|
||||
group.Post("/execute", h.executeCommand)
|
||||
|
||||
// @Summary List all jobs
|
||||
// @Description Get a list of all command execution jobs
|
||||
// @Tags executor
|
||||
// @Produce json
|
||||
// @Success 200 {array} JobResponse
|
||||
// @Router /api/executor/jobs [get]
|
||||
group.Get("/jobs", h.listJobs)
|
||||
|
||||
// @Summary Get job details
|
||||
// @Description Get details of a specific job by ID
|
||||
// @Tags executor
|
||||
// @Produce json
|
||||
// @Param id path string true "Job ID"
|
||||
// @Success 200 {object} JobResponse
|
||||
// @Failure 404 {object} ErrorResponse
|
||||
// @Router /api/executor/jobs/{id} [get]
|
||||
group.Get("/jobs/:id", h.getJob)
|
||||
}
|
||||
|
||||
// @Summary Execute a command
|
||||
// @Description Execute a command and return a job ID
|
||||
// @Tags executor
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param command body ExecuteCommandRequest true "Command to execute"
|
||||
// @Success 200 {object} ExecuteCommandResponse
|
||||
// @Failure 400 {object} ErrorResponse
|
||||
// @Router /api/executor/execute [post]
|
||||
func (h *ExecutorHandler) executeCommand(c *fiber.Ctx) error {
|
||||
var req ExecuteCommandRequest
|
||||
if err := c.BodyParser(&req); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(ErrorResponse{
|
||||
Error: "Invalid request: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
jobID, err := h.executor.ExecuteCommand(req.Command, req.Args)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(ErrorResponse{
|
||||
Error: "Failed to execute command: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(ExecuteCommandResponse{
|
||||
JobID: jobID,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary List all jobs
|
||||
// @Description Get a list of all command execution jobs
|
||||
// @Tags executor
|
||||
// @Produce json
|
||||
// @Success 200 {array} JobResponse
|
||||
// @Router /api/executor/jobs [get]
|
||||
func (h *ExecutorHandler) listJobs(c *fiber.Ctx) error {
|
||||
jobs := h.executor.ListJobs()
|
||||
|
||||
response := make([]JobResponse, 0, len(jobs))
|
||||
for _, job := range jobs {
|
||||
var endTime time.Time
|
||||
if job.Status == "completed" || job.Status == "failed" {
|
||||
endTime = job.EndTime
|
||||
}
|
||||
response = append(response, JobResponse{
|
||||
ID: job.ID,
|
||||
Command: job.Command,
|
||||
Args: job.Args,
|
||||
StartTime: job.StartTime,
|
||||
EndTime: endTime,
|
||||
Status: job.Status,
|
||||
Output: job.Output,
|
||||
Error: job.Error,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(response)
|
||||
}
|
||||
|
||||
// @Summary Get job details
|
||||
// @Description Get details of a specific job by ID
|
||||
// @Tags executor
|
||||
// @Produce json
|
||||
// @Param id path string true "Job ID"
|
||||
// @Success 200 {object} JobResponse
|
||||
// @Failure 404 {object} ErrorResponse
|
||||
// @Router /api/executor/jobs/{id} [get]
|
||||
func (h *ExecutorHandler) getJob(c *fiber.Ctx) error {
|
||||
jobID := c.Params("id")
|
||||
|
||||
job, err := h.executor.GetJob(jobID)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusNotFound).JSON(ErrorResponse{
|
||||
Error: err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
var endTime time.Time
|
||||
if job.Status == "completed" || job.Status == "failed" {
|
||||
endTime = job.EndTime
|
||||
}
|
||||
|
||||
return c.JSON(JobResponse{
|
||||
ID: job.ID,
|
||||
Command: job.Command,
|
||||
Args: job.Args,
|
||||
StartTime: job.StartTime,
|
||||
EndTime: endTime,
|
||||
Status: job.Status,
|
||||
Output: job.Output,
|
||||
Error: job.Error,
|
||||
})
|
||||
}
|
112
_pkg2_dont_use/heroagent/api/jet.go
Normal file
112
_pkg2_dont_use/heroagent/api/jet.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/CloudyKit/jet/v6"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// JetTemplateRequest represents the request body for the checkjet endpoint
|
||||
type JetTemplateRequest struct {
|
||||
Template string `json:"template"`
|
||||
}
|
||||
|
||||
// JetTemplateResponse represents the response for the checkjet endpoint
|
||||
type JetTemplateResponse struct {
|
||||
Valid bool `json:"valid"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// JetHandler handles Jet template-related API endpoints
|
||||
type JetHandler struct {
|
||||
// No dependencies needed for this handler
|
||||
}
|
||||
|
||||
// NewJetHandler creates a new Jet template handler
|
||||
func NewJetHandler() *JetHandler {
|
||||
return &JetHandler{}
|
||||
}
|
||||
|
||||
// RegisterRoutes registers Jet template routes to the fiber app
|
||||
func (h *JetHandler) RegisterRoutes(app *fiber.App) {
|
||||
// Create a group for Jet API endpoints
|
||||
jetGroup := app.Group("/api/jet")
|
||||
|
||||
// Register the checkjet endpoint
|
||||
jetGroup.Post("/validate", h.validateTemplate)
|
||||
}
|
||||
|
||||
// @Summary Validate a Jet template
|
||||
// @Description Validates a Jet template and returns detailed error information if invalid
|
||||
// @Tags jet
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param template body JetTemplateRequest true "Jet template to validate"
|
||||
// @Success 200 {object} JetTemplateResponse
|
||||
// @Failure 400 {object} map[string]interface{}
|
||||
// @Router /api/jet/validate [post]
|
||||
func (h *JetHandler) validateTemplate(c *fiber.Ctx) error {
|
||||
var req JetTemplateRequest
|
||||
if err := c.BodyParser(&req); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Invalid request: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
if req.Template == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Template cannot be empty",
|
||||
})
|
||||
}
|
||||
|
||||
// Create a temporary in-memory loader for the template
|
||||
loader := jet.NewInMemLoader()
|
||||
|
||||
// Add the template to the loader
|
||||
loader.Set("test.jet", req.Template)
|
||||
|
||||
// Create a new Jet set with the loader and enable development mode for better error reporting
|
||||
set := jet.NewSet(loader, jet.InDevelopmentMode())
|
||||
|
||||
// Get the template to parse it
|
||||
_, err := set.GetTemplate("test.jet")
|
||||
|
||||
// Check if the template is valid
|
||||
if err != nil {
|
||||
// Extract meaningful error information
|
||||
errMsg := err.Error()
|
||||
|
||||
// Ignore errors related to extended or included files not found
|
||||
// These aren't syntax errors but dependency errors we want to ignore
|
||||
if strings.Contains(errMsg, "no template") ||
|
||||
strings.Contains(errMsg, "unable to locate template") ||
|
||||
strings.Contains(errMsg, "template not found") ||
|
||||
strings.Contains(errMsg, "extends|import") ||
|
||||
strings.Contains(errMsg, "could not be found") ||
|
||||
strings.Contains(errMsg, "template /") {
|
||||
// Still valid since it's only a dependency error, not a syntax error
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"valid": true,
|
||||
"message": "Template syntax is valid (ignoring extends/include errors)",
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": false,
|
||||
"valid": false,
|
||||
"error": errMsg,
|
||||
})
|
||||
}
|
||||
|
||||
// If no error, the template is valid
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"valid": true,
|
||||
"message": "Template is valid",
|
||||
})
|
||||
}
|
74
_pkg2_dont_use/heroagent/api/main.go
Normal file
74
_pkg2_dont_use/heroagent/api/main.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Package api contains API handlers for HeroLauncher
|
||||
package api
|
||||
|
||||
// @title HeroLauncher API
|
||||
// @version 1.0
|
||||
// @description API for HeroLauncher - a modular service manager
|
||||
// @termsOfService http://swagger.io/terms/
|
||||
// @contact.name API Support
|
||||
// @contact.email support@freeflowuniverse.org
|
||||
// @license.name Apache 2.0
|
||||
// @license.url http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
// @host localhost:9001
|
||||
// @BasePath /api
|
||||
// @schemes http https
|
||||
|
||||
// This file exists solely to provide Swagger documentation
|
||||
// and to ensure all API handlers are included in the documentation
|
||||
|
||||
// AdminHandler handles admin-related API routes
|
||||
// @Router /api/hardware-stats [get]
|
||||
// @Router /api/process-stats [get]
|
||||
|
||||
// ServiceHandler handles service-related API routes
|
||||
// @Router /api/services/running [get]
|
||||
// @Router /api/services/start [post]
|
||||
// @Router /api/services/stop [post]
|
||||
// @Router /api/services/restart [post]
|
||||
// @Router /api/services/delete [post]
|
||||
// @Router /api/services/logs [post]
|
||||
// @Router /admin/services/ [get]
|
||||
// @Router /admin/services/data [get]
|
||||
// @Router /admin/services/running [get]
|
||||
// @Router /admin/services/start [post]
|
||||
// @Router /admin/services/stop [post]
|
||||
// @Router /admin/services/restart [post]
|
||||
// @Router /admin/services/delete [post]
|
||||
// @Router /admin/services/logs [post]
|
||||
|
||||
// ExecutorHandler handles command execution API routes
|
||||
// @Router /api/executor/execute [post]
|
||||
// @Router /api/executor/jobs [get]
|
||||
// @Router /api/executor/jobs/{id} [get]
|
||||
|
||||
// JetHandler handles Jet template API routes
|
||||
// @Router /api/jet/validate [post]
|
||||
|
||||
// RedisHandler handles Redis API routes
|
||||
// @Router /api/redis/set [post]
|
||||
// @Router /api/redis/get/{key} [get]
|
||||
// @Router /api/redis/del/{key} [delete]
|
||||
// @Router /api/redis/keys/{pattern} [get]
|
||||
// @Router /api/redis/hset [post]
|
||||
// @Router /api/redis/hget/{key}/{field} [get]
|
||||
// @Router /api/redis/hdel [post]
|
||||
// @Router /api/redis/hkeys/{key} [get]
|
||||
// @Router /api/redis/hgetall/{key} [get]
|
||||
|
||||
// JobHandler handles HeroJobs API routes
|
||||
// @Router /api/jobs/submit [post]
|
||||
// @Router /api/jobs/get/{id} [get]
|
||||
// @Router /api/jobs/delete/{id} [delete]
|
||||
// @Router /api/jobs/list [get]
|
||||
// @Router /api/jobs/queue/size [get]
|
||||
// @Router /api/jobs/queue/empty [post]
|
||||
// @Router /api/jobs/queue/get [get]
|
||||
// @Router /api/jobs/create [post]
|
||||
// @Router /admin/jobs/submit [post]
|
||||
// @Router /admin/jobs/get/{id} [get]
|
||||
// @Router /admin/jobs/delete/{id} [delete]
|
||||
// @Router /admin/jobs/list [get]
|
||||
// @Router /admin/jobs/queue/size [get]
|
||||
// @Router /admin/jobs/queue/empty [post]
|
||||
// @Router /admin/jobs/queue/get [get]
|
||||
// @Router /admin/jobs/create [post]
|
105
_pkg2_dont_use/heroagent/api/models.go
Normal file
105
_pkg2_dont_use/heroagent/api/models.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package api
|
||||
|
||||
import "time"
|
||||
|
||||
// ErrorResponse represents an error response
|
||||
type ErrorResponse struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// Executor Models
|
||||
|
||||
// ExecuteCommandRequest represents a request to execute a command
|
||||
type ExecuteCommandRequest struct {
|
||||
Command string `json:"command"`
|
||||
Args []string `json:"args"`
|
||||
}
|
||||
|
||||
// ExecuteCommandResponse represents the response from executing a command
|
||||
type ExecuteCommandResponse struct {
|
||||
JobID string `json:"job_id"`
|
||||
}
|
||||
|
||||
// JobResponse represents a job response
|
||||
type JobResponse struct {
|
||||
ID string `json:"id"`
|
||||
Command string `json:"command"`
|
||||
Args []string `json:"args"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
EndTime time.Time `json:"end_time"`
|
||||
Status string `json:"status"`
|
||||
Output string `json:"output"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// Redis Models
|
||||
|
||||
// SetKeyRequest represents a request to set a key
|
||||
type SetKeyRequest struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
ExpirationSeconds int `json:"expiration_seconds"`
|
||||
}
|
||||
|
||||
// SetKeyResponse represents the response from setting a key
|
||||
type SetKeyResponse struct {
|
||||
Success bool `json:"success"`
|
||||
}
|
||||
|
||||
// GetKeyResponse represents the response from getting a key
|
||||
type GetKeyResponse struct {
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// DeleteKeyResponse represents the response from deleting a key
|
||||
type DeleteKeyResponse struct {
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// GetKeysResponse represents the response from getting keys
|
||||
type GetKeysResponse struct {
|
||||
Keys []string `json:"keys"`
|
||||
}
|
||||
|
||||
// HSetKeyRequest represents a request to set a hash field
|
||||
type HSetKeyRequest struct {
|
||||
Key string `json:"key"`
|
||||
Field string `json:"field"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// HSetKeyResponse represents the response from setting a hash field
|
||||
type HSetKeyResponse struct {
|
||||
Added bool `json:"added"`
|
||||
}
|
||||
|
||||
// HGetKeyResponse represents the response from getting a hash field
|
||||
type HGetKeyResponse struct {
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// HDelKeyRequest represents a request to delete hash fields
|
||||
type HDelKeyRequest struct {
|
||||
Key string `json:"key"`
|
||||
Fields []string `json:"fields"`
|
||||
}
|
||||
|
||||
// HDelKeyResponse represents the response from deleting hash fields
|
||||
type HDelKeyResponse struct {
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// HKeysResponse represents the response from getting hash keys
|
||||
type HKeysResponse struct {
|
||||
Fields []string `json:"fields"`
|
||||
}
|
||||
|
||||
// HLenResponse represents the response from getting hash length
|
||||
type HLenResponse struct {
|
||||
Length int `json:"length"`
|
||||
}
|
||||
|
||||
// IncrKeyResponse represents the response from incrementing a key
|
||||
type IncrKeyResponse struct {
|
||||
Value int64 `json:"value"`
|
||||
}
|
544
_pkg2_dont_use/heroagent/api/processmanager.go
Normal file
544
_pkg2_dont_use/heroagent/api/processmanager.go
Normal file
@@ -0,0 +1,544 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/processmanager"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/processmanager/interfaces"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/processmanager/interfaces/openrpc"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// ProcessDisplayInfo represents information about a process for display purposes
|
||||
type ProcessDisplayInfo struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
Uptime string `json:"uptime"`
|
||||
StartTime string `json:"start_time"`
|
||||
CPU string `json:"cpu"`
|
||||
Memory string `json:"memory"`
|
||||
}
|
||||
|
||||
// ConvertToDisplayInfo converts a ProcessInfo from the processmanager package to ProcessDisplayInfo
|
||||
func ConvertToDisplayInfo(info *processmanager.ProcessInfo) ProcessDisplayInfo {
|
||||
// Calculate uptime from start time
|
||||
uptime := formatUptime(time.Since(info.StartTime))
|
||||
|
||||
return ProcessDisplayInfo{
|
||||
ID: fmt.Sprintf("%d", info.PID),
|
||||
Name: info.Name,
|
||||
Status: string(info.Status),
|
||||
Uptime: uptime,
|
||||
StartTime: info.StartTime.Format("2006-01-02 15:04:05"),
|
||||
CPU: fmt.Sprintf("%.2f%%", info.CPUPercent),
|
||||
Memory: fmt.Sprintf("%.2f MB", info.MemoryMB),
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceHandler handles service-related API routes
|
||||
type ServiceHandler struct {
|
||||
client *openrpc.Client
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// default number of log lines to retrieve - use a high value to essentially show all logs
|
||||
const DefaultLogLines = 10000
|
||||
|
||||
// NewServiceHandler creates a new service handler with the provided socket path and secret
|
||||
func NewServiceHandler(socketPath, secret string, logger *log.Logger) *ServiceHandler {
|
||||
fmt.Printf("DEBUG: Creating new api.ServiceHandler with socket path: %s and secret: %s\n", socketPath, secret)
|
||||
return &ServiceHandler{
|
||||
client: openrpc.NewClient(socketPath, secret),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterRoutes registers service API routes
|
||||
func (h *ServiceHandler) RegisterRoutes(app *fiber.App) {
|
||||
// Register common routes to both API and admin groups
|
||||
serviceRoutes := func(group fiber.Router) {
|
||||
group.Get("/running", h.getRunningServices)
|
||||
group.Post("/start", h.startService)
|
||||
group.Post("/stop", h.stopService)
|
||||
group.Post("/restart", h.restartService)
|
||||
group.Post("/delete", h.deleteService)
|
||||
group.Post("/logs", h.getProcessLogs)
|
||||
}
|
||||
|
||||
// Apply common routes to API group
|
||||
apiServices := app.Group("/api/services")
|
||||
serviceRoutes(apiServices)
|
||||
|
||||
// Apply common routes to admin group and add admin-specific routes
|
||||
adminServices := app.Group("/admin/services")
|
||||
serviceRoutes(adminServices)
|
||||
|
||||
// Admin-only routes
|
||||
adminServices.Get("/", h.getServicesPage)
|
||||
adminServices.Get("/data", h.getServicesData)
|
||||
}
|
||||
|
||||
// getProcessList gets a list of processes from the process manager
|
||||
// TODO: add swagger annotations
|
||||
func (h *ServiceHandler) getProcessList() ([]ProcessDisplayInfo, error) {
|
||||
// Debug: Log the function entry
|
||||
h.logger.Printf("Entering getProcessList() function")
|
||||
fmt.Printf("DEBUG: API getProcessList called using client: %p\n", h.client)
|
||||
|
||||
// Get the list of processes via the client
|
||||
result, err := h.client.ListProcesses("json")
|
||||
if err != nil {
|
||||
h.logger.Printf("Error listing processes: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the result to a slice of ProcessStatus
|
||||
processStatuses, ok := result.([]interfaces.ProcessStatus)
|
||||
if !ok {
|
||||
// Try to handle the result as a map or other structure
|
||||
h.logger.Printf("Warning: unexpected result type from ListProcesses, trying alternative parsing")
|
||||
|
||||
// Try to convert the result to JSON and then parse it
|
||||
resultJSON, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
h.logger.Printf("Error marshaling result to JSON: %v", err)
|
||||
return nil, fmt.Errorf("failed to marshal result: %w", err)
|
||||
}
|
||||
|
||||
var processStatuses []interfaces.ProcessStatus
|
||||
if err := json.Unmarshal(resultJSON, &processStatuses); err != nil {
|
||||
h.logger.Printf("Error unmarshaling result to ProcessStatus: %v", err)
|
||||
return nil, fmt.Errorf("failed to unmarshal process list result: %w", err)
|
||||
}
|
||||
|
||||
// Convert to display info format
|
||||
displayInfoList := make([]ProcessDisplayInfo, 0, len(processStatuses))
|
||||
for _, proc := range processStatuses {
|
||||
// Calculate uptime based on start time
|
||||
uptime := formatUptime(time.Since(proc.StartTime))
|
||||
|
||||
displayInfo := ProcessDisplayInfo{
|
||||
ID: fmt.Sprintf("%d", proc.PID),
|
||||
Name: proc.Name,
|
||||
Status: string(proc.Status),
|
||||
Uptime: uptime,
|
||||
StartTime: proc.StartTime.Format("2006-01-02 15:04:05"),
|
||||
CPU: fmt.Sprintf("%.2f%%", proc.CPUPercent),
|
||||
Memory: fmt.Sprintf("%.2f MB", proc.MemoryMB),
|
||||
}
|
||||
displayInfoList = append(displayInfoList, displayInfo)
|
||||
}
|
||||
|
||||
// Debug: Log the number of processes
|
||||
h.logger.Printf("Found %d processes", len(displayInfoList))
|
||||
return displayInfoList, nil
|
||||
}
|
||||
|
||||
// Convert to display info format
|
||||
displayInfoList := make([]ProcessDisplayInfo, 0, len(processStatuses))
|
||||
for _, proc := range processStatuses {
|
||||
// Calculate uptime based on start time
|
||||
uptime := formatUptime(time.Since(proc.StartTime))
|
||||
|
||||
displayInfo := ProcessDisplayInfo{
|
||||
ID: fmt.Sprintf("%d", proc.PID),
|
||||
Name: proc.Name,
|
||||
Status: string(proc.Status),
|
||||
Uptime: uptime,
|
||||
StartTime: proc.StartTime.Format("2006-01-02 15:04:05"),
|
||||
CPU: fmt.Sprintf("%.2f%%", proc.CPUPercent),
|
||||
Memory: fmt.Sprintf("%.2f MB", proc.MemoryMB),
|
||||
}
|
||||
displayInfoList = append(displayInfoList, displayInfo)
|
||||
}
|
||||
|
||||
// Debug: Log the number of processes
|
||||
h.logger.Printf("Found %d processes", len(displayInfoList))
|
||||
|
||||
return displayInfoList, nil
|
||||
}
|
||||
|
||||
// formatUptime formats a duration as a human-readable uptime string
|
||||
func formatUptime(duration time.Duration) string {
|
||||
totalSeconds := int(duration.Seconds())
|
||||
days := totalSeconds / (24 * 3600)
|
||||
hours := (totalSeconds % (24 * 3600)) / 3600
|
||||
minutes := (totalSeconds % 3600) / 60
|
||||
seconds := totalSeconds % 60
|
||||
|
||||
if days > 0 {
|
||||
return fmt.Sprintf("%d days, %d hours", days, hours)
|
||||
} else if hours > 0 {
|
||||
return fmt.Sprintf("%d hours, %d minutes", hours, minutes)
|
||||
} else if minutes > 0 {
|
||||
return fmt.Sprintf("%d minutes, %d seconds", minutes, seconds)
|
||||
} else {
|
||||
return fmt.Sprintf("%d seconds", seconds)
|
||||
}
|
||||
}
|
||||
|
||||
// @Summary Start a service
|
||||
// @Description Start a new service with the given name and command
|
||||
// @Tags services
|
||||
// @Accept x-www-form-urlencoded
|
||||
// @Produce json
|
||||
// @Param name formData string true "Service name"
|
||||
// @Param command formData string true "Command to run"
|
||||
// @Success 200 {object} map[string]interface{}
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/services/start [post]
|
||||
// @Router /admin/services/start [post]
|
||||
func (h *ServiceHandler) startService(c *fiber.Ctx) error {
|
||||
// Get form values
|
||||
name := c.FormValue("name")
|
||||
command := c.FormValue("command")
|
||||
|
||||
// Validate inputs
|
||||
if name == "" || command == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Name and command are required",
|
||||
})
|
||||
}
|
||||
|
||||
// Start the process with default values
|
||||
// logEnabled=true, deadline=0 (no deadline), no cron, no jobID
|
||||
fmt.Printf("DEBUG: API startService called for '%s' using client: %p\n", name, h.client)
|
||||
result, err := h.client.StartProcess(name, command, true, 0, "", "")
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": fmt.Sprintf("Failed to start service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Check if the result indicates success
|
||||
if !result.Success {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
// Get the PID from the result
|
||||
pid := result.PID
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": fmt.Sprintf("Service '%s' started with PID %d", name, pid),
|
||||
"pid": pid,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Stop a service
|
||||
// @Description Stop a running service by name
|
||||
// @Tags services
|
||||
// @Accept x-www-form-urlencoded
|
||||
// @Produce json
|
||||
// @Param name formData string true "Service name"
|
||||
// @Success 200 {object} map[string]interface{}
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/services/stop [post]
|
||||
// @Router /admin/services/stop [post]
|
||||
// stopService stops a service
|
||||
func (h *ServiceHandler) stopService(c *fiber.Ctx) error {
|
||||
// Get form values
|
||||
name := c.FormValue("name")
|
||||
|
||||
// For backward compatibility, try ID field if name is empty
|
||||
if name == "" {
|
||||
name = c.FormValue("id")
|
||||
if name == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Process name is required",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Log the stop request
|
||||
h.logger.Printf("Stopping process with name: %s", name)
|
||||
|
||||
// Stop the process
|
||||
fmt.Printf("DEBUG: API stopService called for '%s' using client: %p\n", name, h.client)
|
||||
result, err := h.client.StopProcess(name)
|
||||
if err != nil {
|
||||
h.logger.Printf("Error stopping process: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": fmt.Sprintf("Failed to stop service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Check if the result indicates success
|
||||
if !result.Success {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": fmt.Sprintf("Service '%s' stopped successfully", name),
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Restart a service
|
||||
// @Description Restart a running service by name
|
||||
// @Tags services
|
||||
// @Accept x-www-form-urlencoded
|
||||
// @Produce json
|
||||
// @Param name formData string true "Service name"
|
||||
// @Success 200 {object} map[string]interface{}
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/services/restart [post]
|
||||
// @Router /admin/services/restart [post]
|
||||
// restartService restarts a service
|
||||
func (h *ServiceHandler) restartService(c *fiber.Ctx) error {
|
||||
// Get form values
|
||||
name := c.FormValue("name")
|
||||
|
||||
// For backward compatibility, try ID field if name is empty
|
||||
if name == "" {
|
||||
name = c.FormValue("id")
|
||||
if name == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Process name is required",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Log the restart request
|
||||
h.logger.Printf("Restarting process with name: %s", name)
|
||||
|
||||
// Restart the process
|
||||
fmt.Printf("DEBUG: API restartService called for '%s' using client: %p\n", name, h.client)
|
||||
result, err := h.client.RestartProcess(name)
|
||||
if err != nil {
|
||||
h.logger.Printf("Error restarting process: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": fmt.Sprintf("Failed to restart service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Check if the result indicates success
|
||||
if !result.Success {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": fmt.Sprintf("Service '%s' restarted successfully", name),
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Delete a service
|
||||
// @Description Delete a service by name
|
||||
// @Tags services
|
||||
// @Accept x-www-form-urlencoded
|
||||
// @Produce json
|
||||
// @Param name formData string true "Service name"
|
||||
// @Success 200 {object} map[string]interface{}
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/services/delete [post]
|
||||
// @Router /admin/services/delete [post]
|
||||
// deleteService deletes a service
|
||||
func (h *ServiceHandler) deleteService(c *fiber.Ctx) error {
|
||||
// Get form values
|
||||
name := c.FormValue("name")
|
||||
|
||||
// Validate inputs
|
||||
if name == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Service name is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Debug: Log the delete request
|
||||
h.logger.Printf("Deleting process with name: %s", name)
|
||||
|
||||
// Delete the process
|
||||
fmt.Printf("DEBUG: API deleteService called for '%s' using client: %p\n", name, h.client)
|
||||
result, err := h.client.DeleteProcess(name)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": fmt.Sprintf("Failed to delete service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Check if the result indicates success
|
||||
if !result.Success {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": fmt.Sprintf("Service '%s' deleted successfully", name),
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Get running services
|
||||
// @Description Get a list of all currently running services
|
||||
// @Tags services
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Success 200 {object} map[string][]ProcessDisplayInfo
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/services/running [get]
|
||||
// @Router /admin/services/running [get]
|
||||
func (h *ServiceHandler) getRunningServices(c *fiber.Ctx) error {
|
||||
// Get the list of processes
|
||||
processes, err := h.getProcessList()
|
||||
if err != nil {
|
||||
h.logger.Printf("Error getting process list: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": fmt.Sprintf("Failed to get process list: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Filter to only include running processes
|
||||
runningProcesses := make([]ProcessDisplayInfo, 0)
|
||||
for _, proc := range processes {
|
||||
if proc.Status == "running" {
|
||||
runningProcesses = append(runningProcesses, proc)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the processes as JSON
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"services": runningProcesses,
|
||||
"processes": processes, // Keep for backward compatibility
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Get process logs
|
||||
// @Description Get logs for a specific process
|
||||
// @Tags services
|
||||
// @Accept x-www-form-urlencoded
|
||||
// @Produce json
|
||||
// @Param name formData string true "Service name"
|
||||
// @Param lines formData integer false "Number of log lines to retrieve"
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/services/logs [post]
|
||||
// @Router /admin/services/logs [post]
|
||||
// getProcessLogs retrieves logs for a specific process
|
||||
func (h *ServiceHandler) getProcessLogs(c *fiber.Ctx) error {
|
||||
// Get form values
|
||||
name := c.FormValue("name")
|
||||
|
||||
// For backward compatibility, try ID field if name is empty
|
||||
if name == "" {
|
||||
name = c.FormValue("id")
|
||||
if name == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Process name is required",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Get the number of lines to retrieve
|
||||
linesStr := c.FormValue("lines")
|
||||
lines := DefaultLogLines
|
||||
if linesStr != "" {
|
||||
if parsedLines, err := strconv.Atoi(linesStr); err == nil && parsedLines > 0 {
|
||||
lines = parsedLines
|
||||
}
|
||||
}
|
||||
|
||||
// Log the request
|
||||
h.logger.Printf("Getting logs for process: %s (lines: %d)", name, lines)
|
||||
|
||||
// Get logs
|
||||
fmt.Printf("DEBUG: API getProcessLogs called for '%s' using client: %p\n", name, h.client)
|
||||
logs, err := h.client.GetProcessLogs(name, lines)
|
||||
if err != nil {
|
||||
h.logger.Printf("Error getting process logs: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": fmt.Sprintf("Failed to get logs: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"logs": logs,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Get services page
|
||||
// @Description Get the services management page
|
||||
// @Tags admin
|
||||
// @Produce html
|
||||
// @Success 200 {string} string "HTML content"
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /admin/services/ [get]
|
||||
// getServicesPage renders the services page
|
||||
func (h *ServiceHandler) getServicesPage(c *fiber.Ctx) error {
|
||||
// Get processes to display on the initial page load
|
||||
processes, _ := h.getProcessList()
|
||||
|
||||
// Check if client is properly initialized
|
||||
var warning string
|
||||
if h.client == nil {
|
||||
warning = "Process manager client is not properly initialized."
|
||||
h.logger.Printf("Warning: %s", warning)
|
||||
}
|
||||
|
||||
return c.Render("admin/services", fiber.Map{
|
||||
"title": "Services",
|
||||
"processes": processes,
|
||||
"warning": warning,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Get services data
|
||||
// @Description Get services data for AJAX updates
|
||||
// @Tags admin
|
||||
// @Produce html
|
||||
// @Success 200 {string} string "HTML content"
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /admin/services/data [get]
|
||||
// getServicesData returns only the services fragment for AJAX updates
|
||||
func (h *ServiceHandler) getServicesData(c *fiber.Ctx) error {
|
||||
// Get processes
|
||||
processes, _ := h.getProcessList()
|
||||
|
||||
// Check if client is properly initialized
|
||||
var warning string
|
||||
if h.client == nil {
|
||||
warning = "Process manager client is not properly initialized."
|
||||
h.logger.Printf("Warning: %s", warning)
|
||||
}
|
||||
|
||||
// Return the fragment with process data and optional warning
|
||||
return c.Render("admin/services_fragment", fiber.Map{
|
||||
"processes": processes,
|
||||
"warning": warning,
|
||||
"layout": "",
|
||||
})
|
||||
}
|
449
_pkg2_dont_use/heroagent/api/redisserver.go
Normal file
449
_pkg2_dont_use/heroagent/api/redisserver.go
Normal file
@@ -0,0 +1,449 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// RedisHandler handles Redis-related API endpoints
|
||||
type RedisHandler struct {
|
||||
redisClient *redis.Client
|
||||
}
|
||||
|
||||
// NewRedisHandler creates a new Redis handler
|
||||
func NewRedisHandler(redisAddr string, isUnixSocket bool) *RedisHandler {
|
||||
// Determine network type
|
||||
networkType := "tcp"
|
||||
if isUnixSocket {
|
||||
networkType = "unix"
|
||||
}
|
||||
|
||||
// Create Redis client
|
||||
client := redis.NewClient(&redis.Options{
|
||||
Network: networkType,
|
||||
Addr: redisAddr,
|
||||
DB: 0,
|
||||
DialTimeout: 5 * time.Second,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 5 * time.Second,
|
||||
})
|
||||
|
||||
return &RedisHandler{
|
||||
redisClient: client,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterRoutes registers Redis routes to the fiber app
|
||||
func (h *RedisHandler) RegisterRoutes(app *fiber.App) {
|
||||
group := app.Group("/api/redis")
|
||||
|
||||
// @Summary Set a Redis key
|
||||
// @Description Set a key-value pair in Redis with optional expiration
|
||||
// @Tags redis
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param request body SetKeyRequest true "Key-value data"
|
||||
// @Success 200 {object} SetKeyResponse
|
||||
// @Failure 400 {object} ErrorResponse
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/redis/set [post]
|
||||
group.Post("/set", h.setKey)
|
||||
|
||||
// @Summary Get a Redis key
|
||||
// @Description Get a value by key from Redis
|
||||
// @Tags redis
|
||||
// @Produce json
|
||||
// @Param key path string true "Key to retrieve"
|
||||
// @Success 200 {object} GetKeyResponse
|
||||
// @Failure 400 {object} ErrorResponse
|
||||
// @Failure 404 {object} ErrorResponse
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/redis/get/{key} [get]
|
||||
group.Get("/get/:key", h.getKey)
|
||||
|
||||
// @Summary Delete a Redis key
|
||||
// @Description Delete a key from Redis
|
||||
// @Tags redis
|
||||
// @Produce json
|
||||
// @Param key path string true "Key to delete"
|
||||
// @Success 200 {object} DeleteKeyResponse
|
||||
// @Failure 400 {object} ErrorResponse
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/redis/del/{key} [delete]
|
||||
group.Delete("/del/:key", h.deleteKey)
|
||||
|
||||
// @Summary Get Redis keys by pattern
|
||||
// @Description Get keys matching a pattern from Redis
|
||||
// @Tags redis
|
||||
// @Produce json
|
||||
// @Param pattern path string true "Pattern to match keys"
|
||||
// @Success 200 {object} GetKeysResponse
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/redis/keys/{pattern} [get]
|
||||
group.Get("/keys/:pattern", h.getKeys)
|
||||
|
||||
// @Summary Set hash fields
|
||||
// @Description Set one or more fields in a Redis hash
|
||||
// @Tags redis
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param request body HSetKeyRequest true "Hash field data"
|
||||
// @Success 200 {object} HSetKeyResponse
|
||||
// @Failure 400 {object} ErrorResponse
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/redis/hset [post]
|
||||
group.Post("/hset", h.hsetKey)
|
||||
|
||||
// @Summary Get hash field
|
||||
// @Description Get a field from a Redis hash
|
||||
// @Tags redis
|
||||
// @Produce json
|
||||
// @Param key path string true "Hash key"
|
||||
// @Param field path string true "Field to retrieve"
|
||||
// @Success 200 {object} HGetKeyResponse
|
||||
// @Failure 400 {object} ErrorResponse
|
||||
// @Failure 404 {object} ErrorResponse
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/redis/hget/{key}/{field} [get]
|
||||
group.Get("/hget/:key/:field", h.hgetKey)
|
||||
|
||||
// @Summary Delete hash fields
|
||||
// @Description Delete one or more fields from a Redis hash
|
||||
// @Tags redis
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param request body HDelKeyRequest true "Fields to delete"
|
||||
// @Success 200 {object} HDelKeyResponse
|
||||
// @Failure 400 {object} ErrorResponse
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/redis/hdel [post]
|
||||
group.Post("/hdel", h.hdelKey)
|
||||
|
||||
// @Summary Get hash fields
|
||||
// @Description Get all field names in a Redis hash
|
||||
// @Tags redis
|
||||
// @Produce json
|
||||
// @Param key path string true "Hash key"
|
||||
// @Success 200 {object} HKeysResponse
|
||||
// @Failure 400 {object} ErrorResponse
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/redis/hkeys/{key} [get]
|
||||
group.Get("/hkeys/:key", h.hkeysKey)
|
||||
|
||||
// @Summary Get all hash fields and values
|
||||
// @Description Get all fields and values in a Redis hash
|
||||
// @Tags redis
|
||||
// @Produce json
|
||||
// @Param key path string true "Hash key"
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Failure 400 {object} ErrorResponse
|
||||
// @Failure 500 {object} ErrorResponse
|
||||
// @Router /api/redis/hgetall/{key} [get]
|
||||
group.Get("/hgetall/:key", h.hgetallKey)
|
||||
}
|
||||
|
||||
// setKey sets a key-value pair in Redis
|
||||
func (h *RedisHandler) setKey(c *fiber.Ctx) error {
|
||||
// Parse request
|
||||
var req struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
Expires int `json:"expires,omitempty"` // Expiration in seconds, optional
|
||||
}
|
||||
|
||||
if err := c.BodyParser(&req); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Invalid request format: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if req.Key == "" || req.Value == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Key and value are required",
|
||||
})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var err error
|
||||
|
||||
// Set with or without expiration
|
||||
if req.Expires > 0 {
|
||||
err = h.redisClient.Set(ctx, req.Key, req.Value, time.Duration(req.Expires)*time.Second).Err()
|
||||
} else {
|
||||
err = h.redisClient.Set(ctx, req.Key, req.Value, 0).Err()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Failed to set key: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": "Key set successfully",
|
||||
})
|
||||
}
|
||||
|
||||
// getKey retrieves a value by key from Redis
|
||||
func (h *RedisHandler) getKey(c *fiber.Ctx) error {
|
||||
key := c.Params("key")
|
||||
if key == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Key is required",
|
||||
})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
val, err := h.redisClient.Get(ctx, key).Result()
|
||||
|
||||
if err == redis.Nil {
|
||||
return c.Status(fiber.StatusNotFound).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Key not found",
|
||||
})
|
||||
} else if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Failed to get key: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"key": key,
|
||||
"value": val,
|
||||
})
|
||||
}
|
||||
|
||||
// deleteKey deletes a key from Redis
|
||||
func (h *RedisHandler) deleteKey(c *fiber.Ctx) error {
|
||||
key := c.Params("key")
|
||||
if key == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Key is required",
|
||||
})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
result, err := h.redisClient.Del(ctx, key).Result()
|
||||
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Failed to delete key: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"deleted": result > 0,
|
||||
"count": result,
|
||||
})
|
||||
}
|
||||
|
||||
// getKeys retrieves keys matching a pattern from Redis
|
||||
func (h *RedisHandler) getKeys(c *fiber.Ctx) error {
|
||||
pattern := c.Params("pattern", "*")
|
||||
|
||||
ctx := context.Background()
|
||||
keys, err := h.redisClient.Keys(ctx, pattern).Result()
|
||||
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Failed to get keys: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"keys": keys,
|
||||
"count": len(keys),
|
||||
})
|
||||
}
|
||||
|
||||
// hsetKey sets a field in a hash stored at key
|
||||
func (h *RedisHandler) hsetKey(c *fiber.Ctx) error {
|
||||
// Parse request
|
||||
var req struct {
|
||||
Key string `json:"key"`
|
||||
Fields map[string]string `json:"fields"`
|
||||
}
|
||||
|
||||
if err := c.BodyParser(&req); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Invalid request format: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if req.Key == "" || len(req.Fields) == 0 {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Key and at least one field are required",
|
||||
})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
totalAdded := 0
|
||||
|
||||
// Use HSet to set multiple fields at once
|
||||
for field, value := range req.Fields {
|
||||
added, err := h.redisClient.HSet(ctx, req.Key, field, value).Result()
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Failed to set hash field: " + err.Error(),
|
||||
})
|
||||
}
|
||||
totalAdded += int(added)
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"added": totalAdded,
|
||||
})
|
||||
}
|
||||
|
||||
// hgetKey retrieves a field from a hash stored at key
|
||||
func (h *RedisHandler) hgetKey(c *fiber.Ctx) error {
|
||||
key := c.Params("key")
|
||||
field := c.Params("field")
|
||||
|
||||
if key == "" || field == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Key and field are required",
|
||||
})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
val, err := h.redisClient.HGet(ctx, key, field).Result()
|
||||
|
||||
if err == redis.Nil {
|
||||
return c.Status(fiber.StatusNotFound).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Field not found in hash",
|
||||
})
|
||||
} else if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Failed to get hash field: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"key": key,
|
||||
"field": field,
|
||||
"value": val,
|
||||
})
|
||||
}
|
||||
|
||||
// hdelKey deletes fields from a hash stored at key
|
||||
func (h *RedisHandler) hdelKey(c *fiber.Ctx) error {
|
||||
// Parse request
|
||||
var req struct {
|
||||
Key string `json:"key"`
|
||||
Fields []string `json:"fields"`
|
||||
}
|
||||
|
||||
if err := c.BodyParser(&req); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Invalid request format: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if req.Key == "" || len(req.Fields) == 0 {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Key and at least one field are required",
|
||||
})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
fields := make([]string, len(req.Fields))
|
||||
copy(fields, req.Fields)
|
||||
|
||||
removed, err := h.redisClient.HDel(ctx, req.Key, fields...).Result()
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Failed to delete hash fields: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"removed": removed,
|
||||
})
|
||||
}
|
||||
|
||||
// hkeysKey retrieves all field names in a hash stored at key
|
||||
func (h *RedisHandler) hkeysKey(c *fiber.Ctx) error {
|
||||
key := c.Params("key")
|
||||
if key == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Key is required",
|
||||
})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
fields, err := h.redisClient.HKeys(ctx, key).Result()
|
||||
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Failed to get hash keys: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"key": key,
|
||||
"fields": fields,
|
||||
"count": len(fields),
|
||||
})
|
||||
}
|
||||
|
||||
// hgetallKey retrieves all fields and values in a hash stored at key
|
||||
func (h *RedisHandler) hgetallKey(c *fiber.Ctx) error {
|
||||
key := c.Params("key")
|
||||
if key == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Key is required",
|
||||
})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
values, err := h.redisClient.HGetAll(ctx, key).Result()
|
||||
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"success": false,
|
||||
"error": "Failed to get hash: " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"key": key,
|
||||
"hash": values,
|
||||
"count": len(values),
|
||||
})
|
||||
}
|
57
_pkg2_dont_use/heroagent/api/tests/test_utils.go
Normal file
57
_pkg2_dont_use/heroagent/api/tests/test_utils.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestSetup represents the common test setup
|
||||
type TestSetup struct {
|
||||
App *fiber.App
|
||||
Assert *assert.Assertions
|
||||
}
|
||||
|
||||
// NewTestSetup creates a new test setup
|
||||
func NewTestSetup(t *testing.T) *TestSetup {
|
||||
return &TestSetup{
|
||||
App: fiber.New(),
|
||||
Assert: assert.New(t),
|
||||
}
|
||||
}
|
||||
|
||||
// PerformRequest performs an HTTP request and returns the response
|
||||
func (ts *TestSetup) PerformRequest(method, path string, body interface{}) *http.Response {
|
||||
// Convert body to JSON if it's not nil
|
||||
var reqBody *bytes.Buffer
|
||||
if body != nil {
|
||||
jsonBody, _ := json.Marshal(body)
|
||||
reqBody = bytes.NewBuffer(jsonBody)
|
||||
} else {
|
||||
reqBody = bytes.NewBuffer(nil)
|
||||
}
|
||||
|
||||
// Create a new HTTP request
|
||||
req := httptest.NewRequest(method, path, reqBody)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
// Perform the request
|
||||
resp, _ := ts.App.Test(req)
|
||||
return resp
|
||||
}
|
||||
|
||||
// AssertStatusCode asserts that the response has the expected status code
|
||||
func (ts *TestSetup) AssertStatusCode(resp *http.Response, expected int) {
|
||||
ts.Assert.Equal(expected, resp.StatusCode, "Expected status code %d but got %d", expected, resp.StatusCode)
|
||||
}
|
||||
|
||||
// ParseResponseBody parses the response body into the given struct
|
||||
func (ts *TestSetup) ParseResponseBody(resp *http.Response, v interface{}) {
|
||||
defer resp.Body.Close()
|
||||
ts.Assert.NoError(json.NewDecoder(resp.Body).Decode(v), "Failed to parse response body")
|
||||
}
|
418
_pkg2_dont_use/heroagent/factory.go
Normal file
418
_pkg2_dont_use/heroagent/factory.go
Normal file
@@ -0,0 +1,418 @@
|
||||
package heroagent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/heroagent/api"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/heroagent/handlers"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/heroagent/pages"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/processmanager"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/sal/executor"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/servers/redisserver"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/system/stats"
|
||||
|
||||
// "git.ourworld.tf/herocode/heroagent/pkg/vfs/interfaces"
|
||||
// "git.ourworld.tf/herocode/heroagent/pkg/vfs/interfaces/mock"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/cors"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
"github.com/gofiber/fiber/v2/middleware/recover"
|
||||
"github.com/gofiber/template/jet/v2"
|
||||
)
|
||||
|
||||
// Config holds the configuration for the HeroLauncher server
|
||||
type Config struct {
|
||||
Port string
|
||||
RedisTCPPort string
|
||||
RedisSocketPath string
|
||||
TemplatesPath string
|
||||
StaticFilesPath string
|
||||
PMSocketPath string // ProcessManager socket path
|
||||
PMSecret string // ProcessManager authentication secret
|
||||
HJSocketPath string // HeroJobs socket path
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for the HeroLauncher server
|
||||
func DefaultConfig() Config {
|
||||
// Get the absolute path to the project root
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
projectRoot := filepath.Join(filepath.Dir(filename), "../..")
|
||||
|
||||
// Check for PORT environment variable
|
||||
port := os.Getenv("PORT")
|
||||
if port == "" {
|
||||
port = "9021" // Default port if not specified
|
||||
}
|
||||
|
||||
return Config{
|
||||
Port: port,
|
||||
RedisTCPPort: "6379",
|
||||
RedisSocketPath: "/tmp/heroagent_new.sock",
|
||||
PMSocketPath: "/tmp/processmanager.sock", // Default ProcessManager socket path
|
||||
PMSecret: "1234", // Default ProcessManager secret
|
||||
HJSocketPath: "/tmp/herojobs.sock", // Default HeroJobs socket path
|
||||
TemplatesPath: filepath.Join(projectRoot, "pkg/heroagent/web/templates"),
|
||||
StaticFilesPath: filepath.Join(projectRoot, "pkg/heroagent/web/static"),
|
||||
}
|
||||
}
|
||||
|
||||
// HeroLauncher represents the main application
|
||||
type HeroLauncher struct {
|
||||
app *fiber.App
|
||||
redisServer *redisserver.Server
|
||||
executorService *executor.Executor
|
||||
pm *processmanager.ProcessManager
|
||||
pmProcess *os.Process // Process for the process manager
|
||||
hjProcess *os.Process // Process for the HeroJobs server
|
||||
// vfsManager interfaces.VFSManager // VFS manager implementation
|
||||
config Config
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// New creates a new instance of HeroLauncher with the provided configuration
|
||||
func New(config Config) *HeroLauncher {
|
||||
// Initialize modules
|
||||
redisServer := redisserver.NewServer(redisserver.ServerConfig{
|
||||
TCPPort: config.RedisTCPPort,
|
||||
UnixSocketPath: config.RedisSocketPath,
|
||||
})
|
||||
executorService := executor.NewExecutor()
|
||||
|
||||
// Initialize process manager directly
|
||||
pm := processmanager.NewProcessManager()
|
||||
|
||||
// Set the shared logs path for process manager
|
||||
sharedLogsPath := filepath.Join(os.TempDir(), "heroagent_logs")
|
||||
pm.SetLogsBasePath(sharedLogsPath)
|
||||
|
||||
// // Initialize VFS manager and client
|
||||
// vfsManager := mock.NewMockVFSManager() // Using mock implementation for now
|
||||
|
||||
// Initialize template engine with debugging enabled
|
||||
// Use absolute path for templates to avoid path resolution issues
|
||||
absTemplatePath, err := filepath.Abs(config.TemplatesPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get absolute path for templates: %v", err)
|
||||
}
|
||||
|
||||
engine := jet.New(absTemplatePath, ".jet")
|
||||
engine.Debug(true) // Enable debug mode to see template errors
|
||||
// Reload templates on each render in development
|
||||
engine.Reload(true)
|
||||
|
||||
// Initialize Fiber app
|
||||
app := fiber.New(fiber.Config{
|
||||
Views: engine,
|
||||
ErrorHandler: func(c *fiber.Ctx, err error) error {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(api.ErrorResponse{
|
||||
Error: err.Error(),
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
// Middleware
|
||||
app.Use(logger.New())
|
||||
app.Use(recover.New())
|
||||
app.Use(cors.New())
|
||||
|
||||
// Static files - serve all directories with proper paths
|
||||
app.Static("/", config.StaticFilesPath)
|
||||
app.Static("/css", config.StaticFilesPath+"/css")
|
||||
app.Static("/js", config.StaticFilesPath+"/js")
|
||||
app.Static("/img", config.StaticFilesPath+"/img")
|
||||
app.Static("/favicon.ico", config.StaticFilesPath+"/favicon.ico")
|
||||
|
||||
// Create HeroLauncher instance
|
||||
hl := &HeroLauncher{
|
||||
app: app,
|
||||
redisServer: redisServer,
|
||||
executorService: executorService,
|
||||
pm: pm,
|
||||
// vfsManager: vfsManager,
|
||||
config: config,
|
||||
startTime: time.Now(),
|
||||
}
|
||||
|
||||
// Initialize and register route handlers
|
||||
hl.setupRoutes()
|
||||
|
||||
return hl
|
||||
}
|
||||
|
||||
// setupRoutes initializes and registers all route handlers
|
||||
func (hl *HeroLauncher) setupRoutes() {
|
||||
// Initialize StatsManager
|
||||
statsManager, err := stats.NewStatsManagerWithDefaults()
|
||||
if err != nil {
|
||||
log.Printf("Warning: Failed to initialize StatsManager: %v\n", err)
|
||||
statsManager = nil
|
||||
}
|
||||
|
||||
// Initialize API handlers
|
||||
apiAdminHandler := api.NewAdminHandler(hl, statsManager)
|
||||
apiServiceHandler := api.NewServiceHandler(hl.config.PMSocketPath, hl.config.PMSecret, log.Default())
|
||||
|
||||
// Initialize Page handlers
|
||||
pageAdminHandler := pages.NewAdminHandler(hl, statsManager, hl.config.PMSocketPath, hl.config.PMSecret)
|
||||
pageServiceHandler := pages.NewServiceHandler(hl.config.PMSocketPath, hl.config.PMSecret, log.Default())
|
||||
|
||||
// Initialize Jobs page handler
|
||||
pageJobHandler, err := pages.NewJobHandler(hl.config.HJSocketPath, log.Default())
|
||||
if err != nil {
|
||||
log.Printf("Warning: Failed to initialize Jobs page handler: %v\n", err)
|
||||
}
|
||||
|
||||
// Initialize JobHandler
|
||||
jobHandler, err := handlers.NewJobHandler(hl.config.HJSocketPath, log.Default())
|
||||
if err != nil {
|
||||
log.Printf("Warning: Failed to initialize JobHandler: %v\n", err)
|
||||
} else {
|
||||
// Register Job routes
|
||||
jobHandler.RegisterRoutes(hl.app)
|
||||
}
|
||||
|
||||
// Register API routes
|
||||
apiAdminHandler.RegisterRoutes(hl.app)
|
||||
apiServiceHandler.RegisterRoutes(hl.app)
|
||||
|
||||
// Register Page routes
|
||||
pageAdminHandler.RegisterRoutes(hl.app)
|
||||
pageServiceHandler.RegisterRoutes(hl.app)
|
||||
|
||||
// Register Jobs page routes if handler was initialized successfully
|
||||
if pageJobHandler != nil {
|
||||
pageJobHandler.RegisterRoutes(hl.app)
|
||||
}
|
||||
|
||||
// TODO: Move these to appropriate API or pages packages
|
||||
executorHandler := api.NewExecutorHandler(hl.executorService)
|
||||
//vfsHandler := routesold.NewVFSHandler(hl.vfsClient, log.Default())
|
||||
|
||||
// Create new API handlers
|
||||
redisAddr := "localhost:" + hl.config.RedisTCPPort
|
||||
redisHandler := api.NewRedisHandler(redisAddr, false)
|
||||
jetHandler := api.NewJetHandler()
|
||||
|
||||
// Register legacy routes (to be migrated)
|
||||
executorHandler.RegisterRoutes(hl.app)
|
||||
//vfsHandler.RegisterRoutes(hl.app)
|
||||
|
||||
// Register new API routes
|
||||
redisHandler.RegisterRoutes(hl.app)
|
||||
jetHandler.RegisterRoutes(hl.app)
|
||||
}
|
||||
|
||||
// GetUptime returns the uptime of the HeroLauncher server as a formatted string
|
||||
func (hl *HeroLauncher) GetUptime() string {
|
||||
// Calculate uptime based on the server's start time
|
||||
uptimeDuration := time.Since(hl.startTime)
|
||||
|
||||
// Use more precise calculation for the uptime
|
||||
totalSeconds := int(uptimeDuration.Seconds())
|
||||
days := totalSeconds / (24 * 3600)
|
||||
hours := (totalSeconds % (24 * 3600)) / 3600
|
||||
minutes := (totalSeconds % 3600) / 60
|
||||
seconds := totalSeconds % 60
|
||||
|
||||
// Format the uptime string based on the duration
|
||||
if days > 0 {
|
||||
return fmt.Sprintf("%d days, %d hours", days, hours)
|
||||
} else if hours > 0 {
|
||||
return fmt.Sprintf("%d hours, %d minutes", hours, minutes)
|
||||
} else if minutes > 0 {
|
||||
return fmt.Sprintf("%d minutes, %d seconds", minutes, seconds)
|
||||
} else {
|
||||
return fmt.Sprintf("%d seconds", seconds)
|
||||
}
|
||||
}
|
||||
|
||||
// startProcessManager starts the process manager as a background process
|
||||
func (hl *HeroLauncher) startProcessManager() error {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
projectRoot := filepath.Join(filepath.Dir(filename), "../..")
|
||||
processManagerPath := filepath.Join(projectRoot, "pkg/processmanager/examples/openrpc/main.go")
|
||||
|
||||
log.Printf("Starting process manager from: %s", processManagerPath)
|
||||
|
||||
// Check if processmanager is already running by testing the socket
|
||||
if _, err := os.Stat(hl.config.PMSocketPath); err == nil {
|
||||
// Try to connect to the socket to verify it's working
|
||||
conn, err := net.Dial("unix", hl.config.PMSocketPath)
|
||||
if err == nil {
|
||||
// Socket is valid and we can connect to it
|
||||
conn.Close()
|
||||
log.Printf("Found existing process manager socket, using it instead of starting a new one")
|
||||
return nil
|
||||
}
|
||||
|
||||
// If socket exists but we can't connect, assume it's stale
|
||||
log.Printf("Found existing socket, but can't connect to it: %v", err)
|
||||
log.Printf("Removing stale socket and starting a new process manager")
|
||||
_ = os.Remove(hl.config.PMSocketPath)
|
||||
}
|
||||
|
||||
// Define shared logs path
|
||||
sharedLogsPath := filepath.Join(os.TempDir(), "heroagent_logs")
|
||||
|
||||
// Ensure the logs directory exists
|
||||
if err := os.MkdirAll(sharedLogsPath, 0755); err != nil {
|
||||
log.Printf("Warning: Failed to create logs directory: %v", err)
|
||||
}
|
||||
|
||||
// Start the process manager with the shared logs path
|
||||
cmd := exec.Command("go", "run", processManagerPath,
|
||||
"-socket", hl.config.PMSocketPath,
|
||||
"-secret", hl.config.PMSecret,
|
||||
"-logs", sharedLogsPath)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start process manager: %v", err)
|
||||
}
|
||||
|
||||
hl.pmProcess = cmd.Process
|
||||
log.Printf("Started process manager with PID: %d", cmd.Process.Pid)
|
||||
|
||||
// Wait for the process manager to start up
|
||||
timeout := time.After(5 * time.Second)
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// Check if the socket exists
|
||||
if _, err := os.Stat(hl.config.PMSocketPath); err == nil {
|
||||
// If socket exists, assume process manager is running
|
||||
log.Printf("Process manager is up and running")
|
||||
return nil
|
||||
}
|
||||
case <-timeout:
|
||||
return fmt.Errorf("timeout waiting for process manager to start")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// startHeroJobs starts the HeroJobs server as a background process
|
||||
func (hl *HeroLauncher) startHeroJobs() error {
|
||||
_, filename, _, _ := runtime.Caller(0)
|
||||
projectRoot := filepath.Join(filepath.Dir(filename), "../..")
|
||||
heroJobsPath := filepath.Join(projectRoot, "cmd/herojobs/main.go")
|
||||
|
||||
log.Printf("Starting HeroJobs from: %s", heroJobsPath)
|
||||
|
||||
// Check if HeroJobs is already running by testing the socket
|
||||
if _, err := os.Stat(hl.config.HJSocketPath); err == nil {
|
||||
// Try to connect to the socket to verify it's working
|
||||
conn, err := net.Dial("unix", hl.config.HJSocketPath)
|
||||
if err == nil {
|
||||
// Socket is valid and we can connect to it
|
||||
conn.Close()
|
||||
log.Printf("Found existing HeroJobs socket, using it instead of starting a new one")
|
||||
return nil
|
||||
}
|
||||
|
||||
// If socket exists but we can't connect, assume it's stale
|
||||
log.Printf("Found existing HeroJobs socket, but can't connect to it: %v", err)
|
||||
log.Printf("Removing stale socket and starting a new HeroJobs server")
|
||||
_ = os.Remove(hl.config.HJSocketPath)
|
||||
}
|
||||
|
||||
// Define shared logs path
|
||||
sharedLogsPath := filepath.Join(os.TempDir(), "heroagent_logs/jobs")
|
||||
|
||||
// Ensure the logs directory exists
|
||||
if err := os.MkdirAll(sharedLogsPath, 0755); err != nil {
|
||||
log.Printf("Warning: Failed to create logs directory: %v", err)
|
||||
}
|
||||
|
||||
// Start HeroJobs with the shared logs path
|
||||
cmd := exec.Command("go", "run", heroJobsPath,
|
||||
"-socket", hl.config.HJSocketPath,
|
||||
"-logs", sharedLogsPath)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start HeroJobs: %v", err)
|
||||
}
|
||||
|
||||
// Store the process reference for graceful shutdown
|
||||
hl.hjProcess = cmd.Process
|
||||
log.Printf("Started HeroJobs with PID: %d", cmd.Process.Pid)
|
||||
|
||||
// Wait for HeroJobs to start up
|
||||
timeout := time.After(5 * time.Second)
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// Check if the socket exists
|
||||
if _, err := os.Stat(hl.config.HJSocketPath); err == nil {
|
||||
// If socket exists, assume HeroJobs is running
|
||||
log.Printf("HeroJobs is up and running")
|
||||
return nil
|
||||
}
|
||||
case <-timeout:
|
||||
return fmt.Errorf("timeout waiting for HeroJobs to start")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the HeroLauncher server
|
||||
func (hl *HeroLauncher) Start() error {
|
||||
// Start the process manager first
|
||||
err := hl.startProcessManager()
|
||||
if err != nil {
|
||||
log.Printf("Warning: Failed to start process manager: %v", err)
|
||||
// Continue anyway, we'll just show warnings in the UI
|
||||
}
|
||||
|
||||
// Start HeroJobs
|
||||
err = hl.startHeroJobs()
|
||||
if err != nil {
|
||||
log.Printf("Warning: Failed to start HeroJobs: %v", err)
|
||||
// Continue anyway, we'll just show warnings in the UI
|
||||
}
|
||||
|
||||
// Setup graceful shutdown
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
go func() {
|
||||
<-c
|
||||
log.Println("Shutting down server...")
|
||||
|
||||
// Kill the process manager if we started it
|
||||
if hl.pmProcess != nil {
|
||||
log.Println("Stopping process manager...")
|
||||
_ = hl.pmProcess.Kill()
|
||||
}
|
||||
|
||||
// Kill the HeroJobs server if we started it
|
||||
if hl.hjProcess != nil {
|
||||
log.Println("Stopping HeroJobs server...")
|
||||
_ = hl.hjProcess.Kill()
|
||||
}
|
||||
|
||||
_ = hl.app.Shutdown()
|
||||
}()
|
||||
|
||||
// Start server
|
||||
log.Printf("Starting server on :%s", hl.config.Port)
|
||||
return hl.app.Listen(":" + hl.config.Port)
|
||||
}
|
487
_pkg2_dont_use/heroagent/handlers/job_handlers.go
Normal file
487
_pkg2_dont_use/heroagent/handlers/job_handlers.go
Normal file
@@ -0,0 +1,487 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv" // Added strconv for JobID parsing
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/herojobs"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// RedisClientInterface defines the methods JobHandler needs from a HeroJobs Redis client.
|
||||
type RedisClientInterface interface {
|
||||
StoreJob(job *herojobs.Job) error
|
||||
EnqueueJob(job *herojobs.Job) error
|
||||
GetJob(jobID interface{}) (*herojobs.Job, error) // Changed jobID type to interface{}
|
||||
ListJobs(circleID, topic string) ([]uint32, error)
|
||||
QueueSize(circleID, topic string) (int64, error)
|
||||
QueueEmpty(circleID, topic string) error
|
||||
// herojobs.Job also has Load() and Save() methods, but those are on the Job object itself,
|
||||
// not typically part of the client interface unless the client is a facade for all job operations.
|
||||
}
|
||||
|
||||
// JobHandler handles job-related routes
|
||||
type JobHandler struct {
|
||||
client RedisClientInterface // Changed to use the interface
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewJobHandler creates a new JobHandler
|
||||
func NewJobHandler(redisAddr string, logger *log.Logger) (*JobHandler, error) {
|
||||
redisClient, err := herojobs.NewRedisClient(redisAddr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HeroJobs Redis client: %w", err)
|
||||
}
|
||||
// *herojobs.RedisClient must implement RedisClientInterface.
|
||||
// This assignment is valid if *herojobs.RedisClient has all methods of RedisClientInterface.
|
||||
return &JobHandler{
|
||||
client: redisClient,
|
||||
logger: logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RegisterRoutes registers job API routes
|
||||
func (h *JobHandler) RegisterRoutes(app *fiber.App) {
|
||||
// Register common routes to both API and admin groups
|
||||
jobRoutes := func(group fiber.Router) {
|
||||
group.Post("/submit", h.submitJob)
|
||||
group.Get("/get/:id", h.getJob)
|
||||
group.Delete("/delete/:id", h.deleteJob)
|
||||
group.Get("/list", h.listJobs)
|
||||
group.Get("/queue/size", h.queueSize)
|
||||
group.Post("/queue/empty", h.queueEmpty)
|
||||
group.Get("/queue/get", h.queueGet)
|
||||
group.Post("/create", h.createJob)
|
||||
}
|
||||
|
||||
// Apply common routes to API group
|
||||
apiJobs := app.Group("/api/jobs")
|
||||
jobRoutes(apiJobs)
|
||||
|
||||
// Apply common routes to admin group
|
||||
adminJobs := app.Group("/admin/jobs")
|
||||
jobRoutes(adminJobs)
|
||||
}
|
||||
|
||||
// @Summary Submit a job
|
||||
// @Description Submit a new job to the HeroJobs server
|
||||
// @Tags jobs
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param job body herojobs.Job true "Job to submit"
|
||||
// @Success 200 {object} herojobs.Job
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/submit [post]
|
||||
// @Router /admin/jobs/submit [post]
|
||||
func (h *JobHandler) submitJob(c *fiber.Ctx) error {
|
||||
// Parse job from request body
|
||||
var job herojobs.Job
|
||||
if err := c.BodyParser(&job); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to parse job data: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Save job to OurDB (this assigns/confirms JobID)
|
||||
if err := job.Save(); err != nil {
|
||||
h.logger.Printf("Failed to save job to OurDB: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to save job: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Store job in Redis
|
||||
if err := h.client.StoreJob(&job); err != nil {
|
||||
h.logger.Printf("Failed to store job in Redis: %v", err)
|
||||
// Attempt to roll back or log, but proceed to enqueue if critical
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to store job in Redis: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Enqueue job in Redis
|
||||
if err := h.client.EnqueueJob(&job); err != nil {
|
||||
h.logger.Printf("Failed to enqueue job in Redis: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to enqueue job: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(job)
|
||||
}
|
||||
|
||||
// @Summary Get a job
|
||||
// @Description Get a job by ID
|
||||
// @Tags jobs
|
||||
// @Produce json
|
||||
// @Param id path string true "Job ID"
|
||||
// @Success 200 {object} herojobs.Job
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/get/{id} [get]
|
||||
// @Router /admin/jobs/get/{id} [get]
|
||||
func (h *JobHandler) getJob(c *fiber.Ctx) error {
|
||||
// Get job ID from path parameter
|
||||
jobIDStr := c.Params("id")
|
||||
if jobIDStr == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Job ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Convert jobID string to uint32
|
||||
jobID64, err := strconv.ParseUint(jobIDStr, 10, 32)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Invalid Job ID format: %s. %v", jobIDStr, err),
|
||||
})
|
||||
}
|
||||
jobID := uint32(jobID64)
|
||||
|
||||
// Get job from Redis first
|
||||
job, err := h.client.GetJob(jobID)
|
||||
if err != nil {
|
||||
// If not found in Redis (e.g. redis.Nil or other error), try OurDB
|
||||
h.logger.Printf("Job %d not found in Redis or error: %v. Trying OurDB.", jobID, err)
|
||||
retrievedJob := &herojobs.Job{JobID: jobID}
|
||||
if loadErr := retrievedJob.Load(); loadErr != nil {
|
||||
h.logger.Printf("Failed to load job %d from OurDB: %v", jobID, loadErr)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to get job %d: %v / %v", jobID, err, loadErr),
|
||||
})
|
||||
}
|
||||
job = retrievedJob // Use the job loaded from OurDB
|
||||
}
|
||||
|
||||
return c.JSON(job)
|
||||
}
|
||||
|
||||
// @Summary Delete a job
|
||||
// @Description Delete a job by ID
|
||||
// @Tags jobs
|
||||
// @Produce json
|
||||
// @Param id path string true "Job ID"
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/delete/{id} [delete]
|
||||
// @Router /admin/jobs/delete/{id} [delete]
|
||||
func (h *JobHandler) deleteJob(c *fiber.Ctx) error {
|
||||
// Get job ID from path parameter
|
||||
jobIDStr := c.Params("id")
|
||||
if jobIDStr == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Job ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Deleting jobs requires removing from OurDB and Redis.
|
||||
// This functionality is not directly provided by RedisClient.DeleteJob
|
||||
// and OurDB job deletion is not specified in README.
|
||||
// For now, returning not implemented.
|
||||
h.logger.Printf("Attempt to delete job %s - not implemented", jobIDStr)
|
||||
return c.Status(fiber.StatusNotImplemented).JSON(fiber.Map{
|
||||
"error": "Job deletion is not implemented",
|
||||
"message": fmt.Sprintf("Job %s deletion requested but not implemented.", jobIDStr),
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary List jobs
|
||||
// @Description List jobs by circle ID and topic
|
||||
// @Tags jobs
|
||||
// @Produce json
|
||||
// @Param circleid query string true "Circle ID"
|
||||
// @Param topic query string true "Topic"
|
||||
// @Success 200 {object} map[string][]string
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/list [get]
|
||||
// @Router /admin/jobs/list [get]
|
||||
func (h *JobHandler) listJobs(c *fiber.Ctx) error {
|
||||
// Get parameters from query
|
||||
circleID := c.Query("circleid")
|
||||
if circleID == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Circle ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
topic := c.Query("topic")
|
||||
if topic == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Topic is required",
|
||||
})
|
||||
}
|
||||
|
||||
// List jobs
|
||||
jobs, err := h.client.ListJobs(circleID, topic)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to list jobs: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"status": "success",
|
||||
"jobs": jobs,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Get queue size
|
||||
// @Description Get the size of a job queue by circle ID and topic
|
||||
// @Tags jobs
|
||||
// @Produce json
|
||||
// @Param circleid query string true "Circle ID"
|
||||
// @Param topic query string true "Topic"
|
||||
// @Success 200 {object} map[string]int64
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/queue/size [get]
|
||||
// @Router /admin/jobs/queue/size [get]
|
||||
func (h *JobHandler) queueSize(c *fiber.Ctx) error {
|
||||
// Get parameters from query
|
||||
circleID := c.Query("circleid")
|
||||
if circleID == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Circle ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
topic := c.Query("topic")
|
||||
if topic == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Topic is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Get queue size
|
||||
size, err := h.client.QueueSize(circleID, topic)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to get queue size: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"status": "success",
|
||||
"size": size,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Empty queue
|
||||
// @Description Empty a job queue by circle ID and topic
|
||||
// @Tags jobs
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param body body object true "Queue parameters"
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/queue/empty [post]
|
||||
// @Router /admin/jobs/queue/empty [post]
|
||||
func (h *JobHandler) queueEmpty(c *fiber.Ctx) error {
|
||||
// Parse parameters from request body
|
||||
var params struct {
|
||||
CircleID string `json:"circleid"`
|
||||
Topic string `json:"topic"`
|
||||
}
|
||||
if err := c.BodyParser(¶ms); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to parse parameters: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if params.CircleID == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Circle ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
if params.Topic == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Topic is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Empty queue
|
||||
if err := h.client.QueueEmpty(params.CircleID, params.Topic); err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to empty queue: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"status": "success",
|
||||
"message": fmt.Sprintf("Queue for circle %s and topic %s emptied successfully", params.CircleID, params.Topic),
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Get job from queue
|
||||
// @Description Get a job from a queue without removing it
|
||||
// @Tags jobs
|
||||
// @Produce json
|
||||
// @Param circleid query string true "Circle ID"
|
||||
// @Param topic query string true "Topic"
|
||||
// @Success 200 {object} herojobs.Job
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/queue/get [get]
|
||||
// @Router /admin/jobs/queue/get [get]
|
||||
func (h *JobHandler) queueGet(c *fiber.Ctx) error {
|
||||
// Get parameters from query
|
||||
circleID := c.Query("circleid")
|
||||
if circleID == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Circle ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
topic := c.Query("topic")
|
||||
if topic == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Topic is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Get list of job IDs (uint32) from the queue (non-destructive)
|
||||
jobIDs, err := h.client.ListJobs(circleID, topic)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to list jobs in queue: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if len(jobIDs) == 0 {
|
||||
return c.Status(fiber.StatusNotFound).JSON(fiber.Map{
|
||||
"error": "Queue is empty or no jobs found",
|
||||
})
|
||||
}
|
||||
|
||||
// Take the first job ID from the list (it's already uint32)
|
||||
jobIDToFetch := jobIDs[0]
|
||||
|
||||
// Get the actual job details using the ID
|
||||
job, err := h.client.GetJob(jobIDToFetch)
|
||||
if err != nil {
|
||||
// If not found in Redis (e.g. redis.Nil or other error), try OurDB
|
||||
h.logger.Printf("Job %d (from queue list) not found in Redis or error: %v. Trying OurDB.", jobIDToFetch, err)
|
||||
retrievedJob := &herojobs.Job{JobID: jobIDToFetch} // Ensure CircleID and Topic are set if Load needs them
|
||||
retrievedJob.CircleID = circleID // Needed for Load if path depends on it
|
||||
retrievedJob.Topic = topic // Needed for Load if path depends on it
|
||||
if loadErr := retrievedJob.Load(); loadErr != nil {
|
||||
h.logger.Printf("Failed to load job %d from OurDB: %v", jobIDToFetch, loadErr)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to get job %d from queue (Redis err: %v / OurDB err: %v)", jobIDToFetch, err, loadErr),
|
||||
})
|
||||
}
|
||||
job = retrievedJob // Use the job loaded from OurDB
|
||||
}
|
||||
|
||||
return c.JSON(job)
|
||||
}
|
||||
|
||||
// @Summary Create job
|
||||
// @Description Create a new job with the given parameters
|
||||
// @Tags jobs
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param body body object true "Job parameters"
|
||||
// @Success 200 {object} herojobs.Job
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/create [post]
|
||||
// @Router /admin/jobs/create [post]
|
||||
func (h *JobHandler) createJob(c *fiber.Ctx) error {
|
||||
// Parse parameters from request body
|
||||
var reqBody struct {
|
||||
CircleID string `json:"circleid"`
|
||||
Topic string `json:"topic"`
|
||||
SessionKey string `json:"sessionkey"`
|
||||
Params string `json:"params"`
|
||||
ParamsType string `json:"paramstype"`
|
||||
Timeout int64 `json:"timeout"` // Optional: allow timeout override
|
||||
Log bool `json:"log"` // Optional: allow log enabling
|
||||
}
|
||||
if err := c.BodyParser(&reqBody); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to parse parameters: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if reqBody.CircleID == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Circle ID is required",
|
||||
})
|
||||
}
|
||||
if reqBody.Topic == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Topic is required",
|
||||
})
|
||||
}
|
||||
if reqBody.Params == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Params are required",
|
||||
})
|
||||
}
|
||||
if reqBody.ParamsType == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "ParamsType is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Create a new job instance
|
||||
job := herojobs.NewJob() // Initializes with defaults
|
||||
job.CircleID = reqBody.CircleID
|
||||
job.Topic = reqBody.Topic
|
||||
job.SessionKey = reqBody.SessionKey
|
||||
job.Params = reqBody.Params
|
||||
|
||||
// Convert ParamsType string to herojobs.ParamsType
|
||||
switch herojobs.ParamsType(reqBody.ParamsType) {
|
||||
case herojobs.ParamsTypeHeroScript:
|
||||
job.ParamsType = herojobs.ParamsTypeHeroScript
|
||||
case herojobs.ParamsTypeRhaiScript:
|
||||
job.ParamsType = herojobs.ParamsTypeRhaiScript
|
||||
case herojobs.ParamsTypeOpenRPC:
|
||||
job.ParamsType = herojobs.ParamsTypeOpenRPC
|
||||
case herojobs.ParamsTypeAI:
|
||||
job.ParamsType = herojobs.ParamsTypeAI
|
||||
default:
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Invalid ParamsType: %s", reqBody.ParamsType),
|
||||
})
|
||||
}
|
||||
|
||||
if reqBody.Timeout > 0 {
|
||||
job.Timeout = reqBody.Timeout
|
||||
}
|
||||
job.Log = reqBody.Log
|
||||
|
||||
// Save job to OurDB (this assigns JobID)
|
||||
if err := job.Save(); err != nil {
|
||||
h.logger.Printf("Failed to save new job to OurDB: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to save new job: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Store job in Redis
|
||||
if err := h.client.StoreJob(job); err != nil {
|
||||
h.logger.Printf("Failed to store new job in Redis: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to store new job in Redis: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Enqueue job in Redis
|
||||
if err := h.client.EnqueueJob(job); err != nil {
|
||||
h.logger.Printf("Failed to enqueue new job in Redis: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to enqueue new job: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(job)
|
||||
}
|
572
_pkg2_dont_use/heroagent/handlers/job_handlers_test.go
Normal file
572
_pkg2_dont_use/heroagent/handlers/job_handlers_test.go
Normal file
@@ -0,0 +1,572 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/herojobs"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockRedisClient is a mock implementation of the RedisClientInterface
|
||||
type MockRedisClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// StoreJob mocks the StoreJob method
|
||||
func (m *MockRedisClient) StoreJob(job *herojobs.Job) error {
|
||||
args := m.Called(job)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// EnqueueJob mocks the EnqueueJob method
|
||||
func (m *MockRedisClient) EnqueueJob(job *herojobs.Job) error {
|
||||
args := m.Called(job)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// GetJob mocks the GetJob method
|
||||
func (m *MockRedisClient) GetJob(jobID interface{}) (*herojobs.Job, error) { // jobID is interface{}
|
||||
args := m.Called(jobID)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*herojobs.Job), args.Error(1)
|
||||
}
|
||||
|
||||
// ListJobs mocks the ListJobs method
|
||||
func (m *MockRedisClient) ListJobs(circleID, topic string) ([]uint32, error) { // Returns []uint32
|
||||
args := m.Called(circleID, topic)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).([]uint32), args.Error(1)
|
||||
}
|
||||
|
||||
// QueueSize mocks the QueueSize method
|
||||
func (m *MockRedisClient) QueueSize(circleID, topic string) (int64, error) {
|
||||
args := m.Called(circleID, topic)
|
||||
// Ensure Get(0) is not nil before type assertion if it can be nil in some error cases
|
||||
if args.Get(0) == nil && args.Error(1) != nil { // If error is set, result might be nil
|
||||
return 0, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(int64), args.Error(1)
|
||||
}
|
||||
|
||||
// QueueEmpty mocks the QueueEmpty method
|
||||
func (m *MockRedisClient) QueueEmpty(circleID, topic string) error {
|
||||
args := m.Called(circleID, topic)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// setupTest initializes a test environment with a mock client
|
||||
func setupTest() (*JobHandler, *MockRedisClient, *fiber.App) {
|
||||
mockClient := new(MockRedisClient)
|
||||
handler := &JobHandler{
|
||||
client: mockClient, // Assign the mock that implements RedisClientInterface
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
// Register routes (ensure these match the actual routes in job_handlers.go)
|
||||
apiJobs := app.Group("/api/jobs") // Assuming routes are under /api/jobs
|
||||
apiJobs.Post("/submit", handler.submitJob)
|
||||
apiJobs.Get("/get/:id", handler.getJob) // :id as per job_handlers.go
|
||||
apiJobs.Delete("/delete/:id", handler.deleteJob) // :id as per job_handlers.go
|
||||
apiJobs.Get("/list", handler.listJobs)
|
||||
apiJobs.Get("/queue/size", handler.queueSize)
|
||||
apiJobs.Post("/queue/empty", handler.queueEmpty)
|
||||
apiJobs.Get("/queue/get", handler.queueGet)
|
||||
apiJobs.Post("/create", handler.createJob)
|
||||
|
||||
// If admin routes are also tested, they need to be registered here too
|
||||
// adminJobs := app.Group("/admin/jobs")
|
||||
// jobRoutes(adminJobs) // if using the same handler instance
|
||||
|
||||
return handler, mockClient, app
|
||||
}
|
||||
|
||||
// createTestRequest creates a test request with the given method, path, and body
|
||||
func createTestRequest(method, path string, body io.Reader) (*http.Request, error) {
|
||||
req := httptest.NewRequest(method, path, body)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// TestQueueEmpty tests the queueEmpty handler
|
||||
func TestQueueEmpty(t *testing.T) {
|
||||
// Test cases
|
||||
tests := []struct {
|
||||
name string
|
||||
circleID string
|
||||
topic string
|
||||
emptyError error
|
||||
expectedStatus int
|
||||
expectedBody string
|
||||
}{
|
||||
{
|
||||
name: "Success",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
emptyError: nil,
|
||||
expectedStatus: fiber.StatusOK,
|
||||
expectedBody: `{"status":"success","message":"Queue for circle test-circle and topic test-topic emptied successfully"}`,
|
||||
},
|
||||
// Removed "Connection Error" test case as Connect is no longer directly called per op
|
||||
{
|
||||
name: "Empty Error",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
emptyError: errors.New("empty error"),
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to empty queue: empty error"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Circle ID",
|
||||
circleID: "",
|
||||
topic: "test-topic",
|
||||
emptyError: nil,
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Circle ID is required"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Topic",
|
||||
circleID: "test-circle",
|
||||
topic: "",
|
||||
emptyError: nil,
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Topic is required"}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a new mock client for each test and setup app
|
||||
_, mockClient, app := setupTest() // Use setupTest to get handler with mock
|
||||
|
||||
// Setup mock expectations
|
||||
if tc.circleID != "" && tc.topic != "" { // Only expect call if params are valid
|
||||
mockClient.On("QueueEmpty", tc.circleID, tc.topic).Return(tc.emptyError)
|
||||
}
|
||||
|
||||
// Create request body
|
||||
reqBody := map[string]string{
|
||||
"circleid": tc.circleID,
|
||||
"topic": tc.topic,
|
||||
}
|
||||
reqBodyBytes, err := json.Marshal(reqBody)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create test request
|
||||
req, err := createTestRequest(http.MethodPost, "/api/jobs/queue/empty", bytes.NewReader(reqBodyBytes))
|
||||
assert.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
// Perform the request
|
||||
resp, err := app.Test(req)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check status code
|
||||
assert.Equal(t, tc.expectedStatus, resp.StatusCode)
|
||||
|
||||
// Check response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.JSONEq(t, tc.expectedBody, string(body))
|
||||
|
||||
// Verify that all expectations were met
|
||||
mockClient.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestQueueGet tests the queueGet handler
|
||||
func TestQueueGet(t *testing.T) {
|
||||
// Create a test job
|
||||
testJob := herojobs.NewJob()
|
||||
testJob.JobID = 10 // This will be a number in JSON
|
||||
testJob.CircleID = "test-circle"
|
||||
testJob.Topic = "test-topic"
|
||||
testJob.Params = "some script"
|
||||
testJob.ParamsType = herojobs.ParamsTypeHeroScript
|
||||
testJob.Status = herojobs.JobStatusNew
|
||||
|
||||
// Test cases
|
||||
tests := []struct {
|
||||
name string
|
||||
circleID string
|
||||
topic string
|
||||
listJobsError error
|
||||
listJobsResp []uint32
|
||||
getJobError error
|
||||
getJobResp *herojobs.Job
|
||||
expectedStatus int
|
||||
expectedBody string // This will need to be updated to match the actual job structure
|
||||
}{
|
||||
{
|
||||
name: "Success",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
listJobsError: nil,
|
||||
listJobsResp: []uint32{10},
|
||||
getJobError: nil,
|
||||
getJobResp: testJob,
|
||||
expectedStatus: fiber.StatusOK,
|
||||
expectedBody: `{"jobid":10,"circleid":"test-circle","topic":"test-topic","params":"some script","paramstype":"HeroScript","status":"new","sessionkey":"","result":"","error":"","timeout":60,"log":false,"timescheduled":0,"timestart":0,"timeend":0}`,
|
||||
},
|
||||
// Removed "Connection Error"
|
||||
{
|
||||
name: "ListJobs Error",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
listJobsError: errors.New("list error"),
|
||||
listJobsResp: nil,
|
||||
getJobError: nil, // Not reached
|
||||
getJobResp: nil, // Not reached
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to list jobs in queue: list error"}`,
|
||||
},
|
||||
{
|
||||
name: "GetJob Error after ListJobs success",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
listJobsError: nil,
|
||||
listJobsResp: []uint32{10},
|
||||
getJobError: errors.New("get error"),
|
||||
getJobResp: nil,
|
||||
expectedStatus: fiber.StatusInternalServerError, // Or based on how GetJob error is handled (e.g. fallback to OurDB)
|
||||
// The error message might be more complex if OurDB load is also attempted and fails
|
||||
expectedBody: `{"error":"Failed to get job 10 from queue (Redis err: get error / OurDB err: record not found)"}`, // Adjusted expected error
|
||||
},
|
||||
{
|
||||
name: "Queue Empty (ListJobs returns empty)",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
listJobsError: nil,
|
||||
listJobsResp: []uint32{}, // Empty list
|
||||
getJobError: nil,
|
||||
getJobResp: nil,
|
||||
expectedStatus: fiber.StatusNotFound,
|
||||
expectedBody: `{"error":"Queue is empty or no jobs found"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Circle ID",
|
||||
circleID: "",
|
||||
topic: "test-topic",
|
||||
listJobsError: nil,
|
||||
listJobsResp: nil,
|
||||
getJobError: nil,
|
||||
getJobResp: nil,
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Circle ID is required"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Topic",
|
||||
circleID: "test-circle",
|
||||
topic: "",
|
||||
listJobsError: nil,
|
||||
listJobsResp: nil,
|
||||
getJobError: nil,
|
||||
getJobResp: nil,
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Topic is required"}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a new mock client for each test and setup app
|
||||
_, mockClient, app := setupTest()
|
||||
|
||||
// Setup mock expectations
|
||||
if tc.circleID != "" && tc.topic != "" {
|
||||
mockClient.On("ListJobs", tc.circleID, tc.topic).Return(tc.listJobsResp, tc.listJobsError)
|
||||
if tc.listJobsError == nil && len(tc.listJobsResp) > 0 {
|
||||
// Expect GetJob to be called with the first ID from listJobsResp
|
||||
// The handler passes uint32 to client.GetJob, which matches interface{}
|
||||
mockClient.On("GetJob", tc.listJobsResp[0]).Return(tc.getJobResp, tc.getJobError).Maybe()
|
||||
// If GetJob from Redis fails, a Load from OurDB is attempted.
|
||||
// We are not mocking job.Load() here as it's on the job object.
|
||||
// The error message in the test case reflects this potential dual failure.
|
||||
}
|
||||
}
|
||||
|
||||
// Create test request
|
||||
path := fmt.Sprintf("/api/jobs/queue/get?circleid=%s&topic=%s", tc.circleID, tc.topic)
|
||||
req, err := createTestRequest(http.MethodGet, path, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform the request
|
||||
resp, err := app.Test(req)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check status code
|
||||
assert.Equal(t, tc.expectedStatus, resp.StatusCode)
|
||||
|
||||
// Check response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.JSONEq(t, tc.expectedBody, string(body))
|
||||
|
||||
// Verify that all expectations were met
|
||||
mockClient.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCreateJob tests the createJob handler
|
||||
func TestCreateJob(t *testing.T) {
|
||||
// Test cases
|
||||
createdJob := herojobs.NewJob()
|
||||
createdJob.JobID = 10 // Assuming Save will populate this; for mock, we set it
|
||||
createdJob.CircleID = "test-circle"
|
||||
createdJob.Topic = "test-topic"
|
||||
createdJob.SessionKey = "test-key"
|
||||
createdJob.Params = "test-params"
|
||||
createdJob.ParamsType = herojobs.ParamsTypeHeroScript // Match "HeroScript" string
|
||||
createdJob.Status = herojobs.JobStatusNew // Default status after NewJob and Save
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
reqBody map[string]interface{} // Use map for flexibility
|
||||
storeError error
|
||||
enqueueError error
|
||||
expectedStatus int
|
||||
expectedBody string // Will be the createdJob marshaled
|
||||
}{
|
||||
{
|
||||
name: "Success",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "test-circle",
|
||||
"topic": "test-topic",
|
||||
"sessionkey": "test-key",
|
||||
"params": "test-params",
|
||||
"paramstype": "HeroScript",
|
||||
"timeout": 30,
|
||||
"log": true,
|
||||
},
|
||||
storeError: nil,
|
||||
enqueueError: nil,
|
||||
expectedStatus: fiber.StatusOK,
|
||||
// Expected body should match the 'createdJob' structure after Save, Store, Enqueue
|
||||
// JobID is assigned by Save(), which we are not mocking here.
|
||||
// The handler returns the job object.
|
||||
// For the test, we assume Save() works and populates JobID if it were a real DB.
|
||||
// The mock will return the job passed to it.
|
||||
expectedBody: `{"jobid":0,"circleid":"test-circle","topic":"test-topic","params":"test-params","paramstype":"HeroScript","status":"new","sessionkey":"test-key","result":"","error":"","timeout":30,"log":true,"timescheduled":0,"timestart":0,"timeend":0}`,
|
||||
},
|
||||
// Removed "Connection Error"
|
||||
{
|
||||
name: "StoreJob Error",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "test-circle", "topic": "test-topic", "params": "p", "paramstype": "HeroScript",
|
||||
},
|
||||
storeError: errors.New("store error"),
|
||||
enqueueError: nil,
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to store new job in Redis: store error"}`,
|
||||
},
|
||||
{
|
||||
name: "EnqueueJob Error",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "test-circle", "topic": "test-topic", "params": "p", "paramstype": "HeroScript",
|
||||
},
|
||||
storeError: nil,
|
||||
enqueueError: errors.New("enqueue error"),
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to enqueue new job in Redis: enqueue error"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Circle ID",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "", "topic": "test-topic", "params": "p", "paramstype": "HeroScript",
|
||||
},
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Circle ID is required"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Topic",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "c", "topic": "", "params": "p", "paramstype": "HeroScript",
|
||||
},
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Topic is required"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Params",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "c", "topic": "t", "params": "", "paramstype": "HeroScript",
|
||||
},
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Params are required"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty ParamsType",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "c", "topic": "t", "params": "p", "paramstype": "",
|
||||
},
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"ParamsType is required"}`,
|
||||
},
|
||||
{
|
||||
name: "Invalid ParamsType",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "c", "topic": "t", "params": "p", "paramstype": "InvalidType",
|
||||
},
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Invalid ParamsType: InvalidType"}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, mockClient, app := setupTest()
|
||||
|
||||
// Setup mock expectations
|
||||
// job.Save() is called before client interactions. We assume it succeeds for these tests.
|
||||
// The mock will be called with a job object. We use mock.AnythingOfType for the job
|
||||
// because the JobID might be populated by Save() in a real scenario, making exact match hard.
|
||||
if tc.reqBody["circleid"] != "" && tc.reqBody["topic"] != "" &&
|
||||
tc.reqBody["params"] != "" && tc.reqBody["paramstype"] != "" &&
|
||||
herojobs.ParamsType(tc.reqBody["paramstype"].(string)) != "" { // Basic validation check
|
||||
|
||||
// We expect StoreJob to be called with a *herojobs.Job.
|
||||
// The actual JobID is set by job.Save() which is not mocked here.
|
||||
// So we use mock.AnythingOfType to match the argument.
|
||||
mockClient.On("StoreJob", mock.AnythingOfType("*herojobs.Job")).Return(tc.storeError).Once().Maybe()
|
||||
|
||||
if tc.storeError == nil {
|
||||
mockClient.On("EnqueueJob", mock.AnythingOfType("*herojobs.Job")).Return(tc.enqueueError).Once().Maybe()
|
||||
}
|
||||
}
|
||||
|
||||
reqBodyBytes, err := json.Marshal(tc.reqBody)
|
||||
assert.NoError(t, err)
|
||||
|
||||
req, err := createTestRequest(http.MethodPost, "/api/jobs/create", bytes.NewReader(reqBodyBytes)) // Use /api/jobs/create
|
||||
assert.NoError(t, err)
|
||||
// Content-Type is set by createTestRequest
|
||||
|
||||
// Perform the request
|
||||
resp, err := app.Test(req)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check status code
|
||||
assert.Equal(t, tc.expectedStatus, resp.StatusCode)
|
||||
|
||||
// Check response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.JSONEq(t, tc.expectedBody, string(body))
|
||||
|
||||
// Verify that all expectations were met
|
||||
mockClient.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSubmitJob tests the submitJob handler
|
||||
func TestSubmitJob(t *testing.T) {
|
||||
// Test cases
|
||||
submittedJob := herojobs.NewJob()
|
||||
submittedJob.JobID = 10 // Assume Save populates this
|
||||
submittedJob.CircleID = "test-circle"
|
||||
submittedJob.Topic = "test-topic"
|
||||
submittedJob.Params = "submitted params"
|
||||
submittedJob.ParamsType = herojobs.ParamsTypeHeroScript
|
||||
submittedJob.Status = herojobs.JobStatusNew
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
jobToSubmit *herojobs.Job // This is the job in the request body
|
||||
storeError error
|
||||
enqueueError error
|
||||
expectedStatus int
|
||||
expectedBody string // Will be the jobToSubmit marshaled (after potential Save)
|
||||
}{
|
||||
{
|
||||
name: "Success",
|
||||
jobToSubmit: submittedJob,
|
||||
storeError: nil,
|
||||
enqueueError: nil,
|
||||
expectedStatus: fiber.StatusOK,
|
||||
// The handler returns the job object from the request after Save(), Store(), Enqueue()
|
||||
// For the mock, the JobID from jobToSubmit will be used.
|
||||
expectedBody: `{"jobid":10,"circleid":"test-circle","topic":"test-topic","params":"submitted params","paramstype":"HeroScript","status":"new","sessionkey":"","result":"","error":"","timeout":60,"log":false,"timescheduled":0,"timestart":0,"timeend":0}`,
|
||||
},
|
||||
// Removed "Connection Error"
|
||||
{
|
||||
name: "StoreJob Error",
|
||||
jobToSubmit: submittedJob,
|
||||
storeError: errors.New("store error"),
|
||||
enqueueError: nil,
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to store job in Redis: store error"}`,
|
||||
},
|
||||
{
|
||||
name: "EnqueueJob Error",
|
||||
jobToSubmit: submittedJob,
|
||||
storeError: nil,
|
||||
enqueueError: errors.New("enqueue error"),
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to enqueue job: enqueue error"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Job in request (parsing error)",
|
||||
jobToSubmit: nil, // Simulates empty or malformed request body
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Failed to parse job data: unexpected end of JSON input"}`, // Or similar based on actual parsing
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, mockClient, app := setupTest()
|
||||
|
||||
// Setup mock expectations
|
||||
// job.Save() is called before client interactions.
|
||||
if tc.jobToSubmit != nil { // If job is parsable from request
|
||||
// We expect StoreJob to be called with the job from the request.
|
||||
// The JobID might be modified by Save() in a real scenario.
|
||||
mockClient.On("StoreJob", tc.jobToSubmit).Return(tc.storeError).Once().Maybe()
|
||||
if tc.storeError == nil {
|
||||
mockClient.On("EnqueueJob", tc.jobToSubmit).Return(tc.enqueueError).Once().Maybe()
|
||||
}
|
||||
}
|
||||
|
||||
var reqBodyBytes []byte
|
||||
var err error
|
||||
if tc.jobToSubmit != nil {
|
||||
reqBodyBytes, err = json.Marshal(tc.jobToSubmit)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
req, err := createTestRequest(http.MethodPost, "/api/jobs/submit", bytes.NewReader(reqBodyBytes)) // Use /api/jobs/submit
|
||||
assert.NoError(t, err)
|
||||
// Content-Type is set by createTestRequest
|
||||
|
||||
// Perform the request
|
||||
resp, err := app.Test(req)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check status code
|
||||
assert.Equal(t, tc.expectedStatus, resp.StatusCode)
|
||||
|
||||
// Check response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.JSONEq(t, tc.expectedBody, string(body))
|
||||
|
||||
// Verify that all expectations were met
|
||||
mockClient.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
554
_pkg2_dont_use/heroagent/handlers/log_handlers.go
Normal file
554
_pkg2_dont_use/heroagent/handlers/log_handlers.go
Normal file
@@ -0,0 +1,554 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/logger"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// LogHandler handles log-related routes
|
||||
type LogHandler struct {
|
||||
systemLogger *logger.Logger
|
||||
serviceLogger *logger.Logger
|
||||
jobLogger *logger.Logger
|
||||
processLogger *logger.Logger
|
||||
logBasePath string
|
||||
}
|
||||
|
||||
// NewLogHandler creates a new LogHandler
|
||||
func NewLogHandler(logPath string) (*LogHandler, error) {
|
||||
// Create base directories for different log types
|
||||
systemLogPath := filepath.Join(logPath, "system")
|
||||
serviceLogPath := filepath.Join(logPath, "services")
|
||||
jobLogPath := filepath.Join(logPath, "jobs")
|
||||
processLogPath := filepath.Join(logPath, "processes")
|
||||
|
||||
// Create logger instances for each type
|
||||
systemLogger, err := logger.New(systemLogPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create system logger: %w", err)
|
||||
}
|
||||
|
||||
serviceLogger, err := logger.New(serviceLogPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create service logger: %w", err)
|
||||
}
|
||||
|
||||
jobLogger, err := logger.New(jobLogPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create job logger: %w", err)
|
||||
}
|
||||
|
||||
processLogger, err := logger.New(processLogPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create process logger: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Log handler created successfully with paths:\n System: %s\n Services: %s\n Jobs: %s\n Processes: %s\n",
|
||||
systemLogPath, serviceLogPath, jobLogPath, processLogPath)
|
||||
|
||||
return &LogHandler{
|
||||
systemLogger: systemLogger,
|
||||
serviceLogger: serviceLogger,
|
||||
jobLogger: jobLogger,
|
||||
processLogger: processLogger,
|
||||
logBasePath: logPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LogType represents the type of logs to retrieve
|
||||
type LogType string
|
||||
|
||||
const (
|
||||
LogTypeSystem LogType = "system"
|
||||
LogTypeService LogType = "service"
|
||||
LogTypeJob LogType = "job"
|
||||
LogTypeProcess LogType = "process"
|
||||
LogTypeAll LogType = "all" // Special type to retrieve logs from all sources
|
||||
)
|
||||
|
||||
// GetLogs renders the logs page with logs content
|
||||
func (h *LogHandler) GetLogs(c *fiber.Ctx) error {
|
||||
// Check which logger to use based on the log type parameter
|
||||
logTypeParam := c.Query("log_type", string(LogTypeSystem))
|
||||
|
||||
// Parse query parameters
|
||||
category := c.Query("category", "")
|
||||
logItemType := parseLogType(c.Query("type", ""))
|
||||
maxItems := c.QueryInt("max_items", 100)
|
||||
page := c.QueryInt("page", 1)
|
||||
itemsPerPage := 20 // Default items per page
|
||||
|
||||
// Parse time range
|
||||
fromTime := parseTimeParam(c.Query("from", ""))
|
||||
toTime := parseTimeParam(c.Query("to", ""))
|
||||
|
||||
// Create search arguments
|
||||
searchArgs := logger.SearchArgs{
|
||||
Category: category,
|
||||
LogType: logItemType,
|
||||
MaxItems: maxItems,
|
||||
}
|
||||
|
||||
if !fromTime.IsZero() {
|
||||
searchArgs.TimestampFrom = &fromTime
|
||||
}
|
||||
|
||||
if !toTime.IsZero() {
|
||||
searchArgs.TimestampTo = &toTime
|
||||
}
|
||||
|
||||
// Variables for logs and error
|
||||
var logs []logger.LogItem
|
||||
var err error
|
||||
var logTypeTitle string
|
||||
|
||||
// Check if we want to merge logs from all sources
|
||||
if LogType(logTypeParam) == LogTypeAll {
|
||||
// Get merged logs from all loggers
|
||||
logs, err = h.getMergedLogs(searchArgs)
|
||||
logTypeTitle = "All Logs"
|
||||
} else {
|
||||
// Select the appropriate logger based on the log type
|
||||
var selectedLogger *logger.Logger
|
||||
|
||||
switch LogType(logTypeParam) {
|
||||
case LogTypeService:
|
||||
selectedLogger = h.serviceLogger
|
||||
logTypeTitle = "Service Logs"
|
||||
case LogTypeJob:
|
||||
selectedLogger = h.jobLogger
|
||||
logTypeTitle = "Job Logs"
|
||||
case LogTypeProcess:
|
||||
selectedLogger = h.processLogger
|
||||
logTypeTitle = "Process Logs"
|
||||
default:
|
||||
selectedLogger = h.systemLogger
|
||||
logTypeTitle = "System Logs"
|
||||
}
|
||||
|
||||
// Check if the selected logger is properly initialized
|
||||
if selectedLogger == nil {
|
||||
return c.Render("admin/system/logs", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"error": "Logger not initialized",
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
})
|
||||
}
|
||||
|
||||
// Search for logs using the selected logger
|
||||
logs, err = selectedLogger.Search(searchArgs)
|
||||
}
|
||||
|
||||
// Handle search error
|
||||
if err != nil {
|
||||
return c.Render("admin/system/logs", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"error": err.Error(),
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
})
|
||||
}
|
||||
|
||||
// Calculate total pages
|
||||
totalLogs := len(logs)
|
||||
totalPages := (totalLogs + itemsPerPage - 1) / itemsPerPage
|
||||
|
||||
// Apply pagination
|
||||
startIndex := (page - 1) * itemsPerPage
|
||||
endIndex := startIndex + itemsPerPage
|
||||
if endIndex > totalLogs {
|
||||
endIndex = totalLogs
|
||||
}
|
||||
|
||||
// Slice logs for current page
|
||||
pagedLogs := logs
|
||||
if startIndex < totalLogs {
|
||||
pagedLogs = logs[startIndex:endIndex]
|
||||
} else {
|
||||
pagedLogs = []logger.LogItem{}
|
||||
}
|
||||
|
||||
// Convert logs to a format suitable for the UI
|
||||
formattedLogs := make([]fiber.Map, 0, len(pagedLogs))
|
||||
for _, log := range pagedLogs {
|
||||
logTypeStr := "INFO"
|
||||
logTypeClass := "log-info"
|
||||
if log.LogType == logger.LogTypeError {
|
||||
logTypeStr = "ERROR"
|
||||
logTypeClass = "log-error"
|
||||
}
|
||||
|
||||
formattedLogs = append(formattedLogs, fiber.Map{
|
||||
"timestamp": log.Timestamp.Format("2006-01-02T15:04:05"),
|
||||
"category": log.Category,
|
||||
"message": log.Message,
|
||||
"type": logTypeStr,
|
||||
"typeClass": logTypeClass,
|
||||
})
|
||||
}
|
||||
|
||||
return c.Render("admin/system/logs", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
"logs": formattedLogs,
|
||||
"total": totalLogs,
|
||||
"showing": len(formattedLogs),
|
||||
"page": page,
|
||||
"totalPages": totalPages,
|
||||
"categoryParam": category,
|
||||
"typeParam": c.Query("type", ""),
|
||||
"fromParam": c.Query("from", ""),
|
||||
"toParam": c.Query("to", ""),
|
||||
})
|
||||
}
|
||||
|
||||
// GetLogsAPI returns logs in JSON format for API consumption
|
||||
func (h *LogHandler) GetLogsAPI(c *fiber.Ctx) error {
|
||||
// Check which logger to use based on the log type parameter
|
||||
logTypeParam := c.Query("log_type", string(LogTypeSystem))
|
||||
|
||||
// Parse query parameters
|
||||
category := c.Query("category", "")
|
||||
logItemType := parseLogType(c.Query("type", ""))
|
||||
maxItems := c.QueryInt("max_items", 100)
|
||||
|
||||
// Parse time range
|
||||
fromTime := parseTimeParam(c.Query("from", ""))
|
||||
toTime := parseTimeParam(c.Query("to", ""))
|
||||
|
||||
// Create search arguments
|
||||
searchArgs := logger.SearchArgs{
|
||||
Category: category,
|
||||
LogType: logItemType,
|
||||
MaxItems: maxItems,
|
||||
}
|
||||
|
||||
if !fromTime.IsZero() {
|
||||
searchArgs.TimestampFrom = &fromTime
|
||||
}
|
||||
|
||||
if !toTime.IsZero() {
|
||||
searchArgs.TimestampTo = &toTime
|
||||
}
|
||||
|
||||
// Variables for logs and error
|
||||
var logs []logger.LogItem
|
||||
var err error
|
||||
|
||||
// Check if we want to merge logs from all sources
|
||||
if LogType(logTypeParam) == LogTypeAll {
|
||||
// Get merged logs from all loggers
|
||||
logs, err = h.getMergedLogs(searchArgs)
|
||||
} else {
|
||||
// Select the appropriate logger based on the log type
|
||||
var selectedLogger *logger.Logger
|
||||
|
||||
switch LogType(logTypeParam) {
|
||||
case LogTypeService:
|
||||
selectedLogger = h.serviceLogger
|
||||
case LogTypeJob:
|
||||
selectedLogger = h.jobLogger
|
||||
case LogTypeProcess:
|
||||
selectedLogger = h.processLogger
|
||||
default:
|
||||
selectedLogger = h.systemLogger
|
||||
}
|
||||
|
||||
// Check if the selected logger is properly initialized
|
||||
if selectedLogger == nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "Logger not initialized",
|
||||
})
|
||||
}
|
||||
|
||||
// Search for logs using the selected logger
|
||||
logs, err = selectedLogger.Search(searchArgs)
|
||||
}
|
||||
|
||||
// Handle search error
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
// Convert logs to a format suitable for the UI
|
||||
response := make([]fiber.Map, 0, len(logs))
|
||||
for _, log := range logs {
|
||||
logTypeStr := "INFO"
|
||||
if log.LogType == logger.LogTypeError {
|
||||
logTypeStr = "ERROR"
|
||||
}
|
||||
|
||||
response = append(response, fiber.Map{
|
||||
"timestamp": log.Timestamp.Format(time.RFC3339),
|
||||
"category": log.Category,
|
||||
"message": log.Message,
|
||||
"type": logTypeStr,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"logs": response,
|
||||
"total": len(logs),
|
||||
})
|
||||
}
|
||||
|
||||
// GetLogsFragment returns logs in HTML format for Unpoly partial updates
|
||||
func (h *LogHandler) GetLogsFragment(c *fiber.Ctx) error {
|
||||
// This is a fragment template for Unpoly updates
|
||||
|
||||
// Check which logger to use based on the log type parameter
|
||||
logTypeParam := c.Query("log_type", string(LogTypeSystem))
|
||||
|
||||
// Parse query parameters
|
||||
category := c.Query("category", "")
|
||||
logItemType := parseLogType(c.Query("type", ""))
|
||||
maxItems := c.QueryInt("max_items", 100)
|
||||
page := c.QueryInt("page", 1)
|
||||
itemsPerPage := 20 // Default items per page
|
||||
|
||||
// Parse time range
|
||||
fromTime := parseTimeParam(c.Query("from", ""))
|
||||
toTime := parseTimeParam(c.Query("to", ""))
|
||||
|
||||
// Create search arguments
|
||||
searchArgs := logger.SearchArgs{
|
||||
Category: category,
|
||||
LogType: logItemType,
|
||||
MaxItems: maxItems,
|
||||
}
|
||||
|
||||
if !fromTime.IsZero() {
|
||||
searchArgs.TimestampFrom = &fromTime
|
||||
}
|
||||
|
||||
if !toTime.IsZero() {
|
||||
searchArgs.TimestampTo = &toTime
|
||||
}
|
||||
|
||||
// Variables for logs and error
|
||||
var logs []logger.LogItem
|
||||
var err error
|
||||
var logTypeTitle string
|
||||
|
||||
// Check if we want to merge logs from all sources
|
||||
if LogType(logTypeParam) == LogTypeAll {
|
||||
// Get merged logs from all loggers
|
||||
logs, err = h.getMergedLogs(searchArgs)
|
||||
logTypeTitle = "All Logs"
|
||||
} else {
|
||||
// Select the appropriate logger based on the log type
|
||||
var selectedLogger *logger.Logger
|
||||
|
||||
switch LogType(logTypeParam) {
|
||||
case LogTypeService:
|
||||
selectedLogger = h.serviceLogger
|
||||
logTypeTitle = "Service Logs"
|
||||
case LogTypeJob:
|
||||
selectedLogger = h.jobLogger
|
||||
logTypeTitle = "Job Logs"
|
||||
case LogTypeProcess:
|
||||
selectedLogger = h.processLogger
|
||||
logTypeTitle = "Process Logs"
|
||||
default:
|
||||
selectedLogger = h.systemLogger
|
||||
logTypeTitle = "System Logs"
|
||||
}
|
||||
|
||||
// Check if the selected logger is properly initialized
|
||||
if selectedLogger == nil {
|
||||
return c.Render("admin/system/logs_fragment", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"error": "Logger not initialized",
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
})
|
||||
}
|
||||
|
||||
// Search for logs using the selected logger
|
||||
logs, err = selectedLogger.Search(searchArgs)
|
||||
}
|
||||
|
||||
// Handle search error
|
||||
if err != nil {
|
||||
return c.Render("admin/system/logs_fragment", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"error": err.Error(),
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
})
|
||||
}
|
||||
|
||||
// Calculate total pages
|
||||
totalLogs := len(logs)
|
||||
totalPages := (totalLogs + itemsPerPage - 1) / itemsPerPage
|
||||
|
||||
// Apply pagination
|
||||
startIndex := (page - 1) * itemsPerPage
|
||||
endIndex := startIndex + itemsPerPage
|
||||
if endIndex > totalLogs {
|
||||
endIndex = totalLogs
|
||||
}
|
||||
|
||||
// Slice logs for current page
|
||||
pagedLogs := logs
|
||||
if startIndex < totalLogs {
|
||||
pagedLogs = logs[startIndex:endIndex]
|
||||
} else {
|
||||
pagedLogs = []logger.LogItem{}
|
||||
}
|
||||
|
||||
// Convert logs to a format suitable for the UI
|
||||
formattedLogs := make([]fiber.Map, 0, len(pagedLogs))
|
||||
for _, log := range pagedLogs {
|
||||
logTypeStr := "INFO"
|
||||
logTypeClass := "log-info"
|
||||
if log.LogType == logger.LogTypeError {
|
||||
logTypeStr = "ERROR"
|
||||
logTypeClass = "log-error"
|
||||
}
|
||||
|
||||
formattedLogs = append(formattedLogs, fiber.Map{
|
||||
"timestamp": log.Timestamp.Format("2006-01-02T15:04:05"),
|
||||
"category": log.Category,
|
||||
"message": log.Message,
|
||||
"type": logTypeStr,
|
||||
"typeClass": logTypeClass,
|
||||
})
|
||||
}
|
||||
|
||||
// Set layout to empty to disable the layout for fragment responses
|
||||
return c.Render("admin/system/logs_fragment", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
"logs": formattedLogs,
|
||||
"total": totalLogs,
|
||||
"showing": len(formattedLogs),
|
||||
"page": page,
|
||||
"totalPages": totalPages,
|
||||
"layout": "", // Disable layout for partial template
|
||||
})
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
// parseLogType converts a string log type to the appropriate LogType enum
|
||||
func parseLogType(logTypeStr string) logger.LogType {
|
||||
switch logTypeStr {
|
||||
case "error":
|
||||
return logger.LogTypeError
|
||||
default:
|
||||
return logger.LogTypeStdout
|
||||
}
|
||||
}
|
||||
|
||||
// parseTimeParam parses a time string in ISO format
|
||||
func parseTimeParam(timeStr string) time.Time {
|
||||
if timeStr == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339, timeStr)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// getMergedLogs retrieves and merges logs from all available loggers
|
||||
func (h *LogHandler) getMergedLogs(args logger.SearchArgs) ([]logger.LogItem, error) {
|
||||
// Create a slice to hold all logs
|
||||
allLogs := make([]logger.LogItem, 0)
|
||||
|
||||
// Create a map to track errors
|
||||
errors := make(map[string]error)
|
||||
|
||||
// Get logs from system logger if available
|
||||
if h.systemLogger != nil {
|
||||
systemLogs, err := h.systemLogger.Search(args)
|
||||
if err != nil {
|
||||
errors["system"] = err
|
||||
} else {
|
||||
// Add source information to each log item
|
||||
for i := range systemLogs {
|
||||
systemLogs[i].Category = fmt.Sprintf("system:%s", systemLogs[i].Category)
|
||||
}
|
||||
allLogs = append(allLogs, systemLogs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Get logs from service logger if available
|
||||
if h.serviceLogger != nil {
|
||||
serviceLogs, err := h.serviceLogger.Search(args)
|
||||
if err != nil {
|
||||
errors["service"] = err
|
||||
} else {
|
||||
// Add source information to each log item
|
||||
for i := range serviceLogs {
|
||||
serviceLogs[i].Category = fmt.Sprintf("service:%s", serviceLogs[i].Category)
|
||||
}
|
||||
allLogs = append(allLogs, serviceLogs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Get logs from job logger if available
|
||||
if h.jobLogger != nil {
|
||||
jobLogs, err := h.jobLogger.Search(args)
|
||||
if err != nil {
|
||||
errors["job"] = err
|
||||
} else {
|
||||
// Add source information to each log item
|
||||
for i := range jobLogs {
|
||||
jobLogs[i].Category = fmt.Sprintf("job:%s", jobLogs[i].Category)
|
||||
}
|
||||
allLogs = append(allLogs, jobLogs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Get logs from process logger if available
|
||||
if h.processLogger != nil {
|
||||
processLogs, err := h.processLogger.Search(args)
|
||||
if err != nil {
|
||||
errors["process"] = err
|
||||
} else {
|
||||
// Add source information to each log item
|
||||
for i := range processLogs {
|
||||
processLogs[i].Category = fmt.Sprintf("process:%s", processLogs[i].Category)
|
||||
}
|
||||
allLogs = append(allLogs, processLogs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have any logs
|
||||
if len(allLogs) == 0 && len(errors) > 0 {
|
||||
// Combine error messages
|
||||
errorMsgs := make([]string, 0, len(errors))
|
||||
for source, err := range errors {
|
||||
errorMsgs = append(errorMsgs, fmt.Sprintf("%s: %s", source, err.Error()))
|
||||
}
|
||||
return nil, fmt.Errorf("failed to retrieve logs: %s", strings.Join(errorMsgs, "; "))
|
||||
}
|
||||
|
||||
// Sort logs by timestamp (newest first)
|
||||
sort.Slice(allLogs, func(i, j int) bool {
|
||||
return allLogs[i].Timestamp.After(allLogs[j].Timestamp)
|
||||
})
|
||||
|
||||
// Apply max items limit if specified
|
||||
if args.MaxItems > 0 && len(allLogs) > args.MaxItems {
|
||||
allLogs = allLogs[:args.MaxItems]
|
||||
}
|
||||
|
||||
return allLogs, nil
|
||||
}
|
203
_pkg2_dont_use/heroagent/handlers/process_handlers.go
Normal file
203
_pkg2_dont_use/heroagent/handlers/process_handlers.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/system/stats"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// ProcessHandler handles process-related routes
|
||||
type ProcessHandler struct {
|
||||
statsManager *stats.StatsManager
|
||||
}
|
||||
|
||||
// NewProcessHandler creates a new ProcessHandler
|
||||
func NewProcessHandler(statsManager *stats.StatsManager) *ProcessHandler {
|
||||
return &ProcessHandler{
|
||||
statsManager: statsManager,
|
||||
}
|
||||
}
|
||||
|
||||
// GetProcessStatsJSON returns process stats in JSON format for API consumption
|
||||
func (h *ProcessHandler) GetProcessStatsJSON(c *fiber.Ctx) error {
|
||||
// Check if StatsManager is properly initialized
|
||||
if h.statsManager == nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "System error: Stats manager not initialized",
|
||||
})
|
||||
}
|
||||
|
||||
// Get process data from the StatsManager
|
||||
processData, err := h.statsManager.GetProcessStatsFresh(100) // Limit to 100 processes
|
||||
if err != nil {
|
||||
// Try getting cached data as fallback
|
||||
processData, err = h.statsManager.GetProcessStats(100)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "Failed to get process data: " + err.Error(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to fiber.Map for JSON response
|
||||
response := fiber.Map{
|
||||
"total": processData.Total,
|
||||
"filtered": processData.Filtered,
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
|
||||
// Convert processes to a slice of maps
|
||||
processes := make([]fiber.Map, len(processData.Processes))
|
||||
for i, proc := range processData.Processes {
|
||||
processes[i] = fiber.Map{
|
||||
"pid": proc.PID,
|
||||
"name": proc.Name,
|
||||
"status": proc.Status,
|
||||
"cpu_percent": proc.CPUPercent,
|
||||
"memory_mb": proc.MemoryMB,
|
||||
"create_time_str": proc.CreateTime,
|
||||
"is_current": proc.IsCurrent,
|
||||
}
|
||||
}
|
||||
|
||||
response["processes"] = processes
|
||||
|
||||
// Return JSON response
|
||||
return c.JSON(response)
|
||||
}
|
||||
|
||||
// GetProcesses renders the processes page with initial process data
|
||||
func (h *ProcessHandler) GetProcesses(c *fiber.Ctx) error {
|
||||
// Check if StatsManager is properly initialized
|
||||
if h.statsManager == nil {
|
||||
return c.Render("admin/system/processes", fiber.Map{
|
||||
"processes": []fiber.Map{},
|
||||
"error": "System error: Stats manager not initialized",
|
||||
"warning": "The process manager is not properly initialized.",
|
||||
})
|
||||
}
|
||||
|
||||
// Force cache refresh for process stats
|
||||
h.statsManager.ForceUpdate("process")
|
||||
|
||||
// Get process data from the StatsManager
|
||||
processData, err := h.statsManager.GetProcessStatsFresh(0) // Get all processes with fresh data
|
||||
if err != nil {
|
||||
// Try getting cached data as fallback
|
||||
processData, err = h.statsManager.GetProcessStats(0)
|
||||
if err != nil {
|
||||
// If there's an error, still render the page but with empty data
|
||||
return c.Render("admin/system/processes", fiber.Map{
|
||||
"processes": []fiber.Map{},
|
||||
"error": "Failed to load process data: " + err.Error(),
|
||||
"warning": "System attempted both fresh and cached data retrieval but failed.",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to []fiber.Map for template rendering
|
||||
processStats := make([]fiber.Map, len(processData.Processes))
|
||||
for i, proc := range processData.Processes {
|
||||
processStats[i] = fiber.Map{
|
||||
"pid": proc.PID,
|
||||
"name": proc.Name,
|
||||
"status": proc.Status,
|
||||
"cpu_percent": proc.CPUPercent,
|
||||
"memory_mb": proc.MemoryMB,
|
||||
"create_time_str": proc.CreateTime,
|
||||
"is_current": proc.IsCurrent,
|
||||
"cpu_percent_str": fmt.Sprintf("%.1f%%", proc.CPUPercent),
|
||||
"memory_mb_str": fmt.Sprintf("%.1f MB", proc.MemoryMB),
|
||||
}
|
||||
}
|
||||
|
||||
// Render the full page with initial process data
|
||||
return c.Render("admin/system/processes", fiber.Map{
|
||||
"processes": processStats,
|
||||
})
|
||||
}
|
||||
|
||||
// GetProcessesData returns the HTML fragment for processes data
|
||||
func (h *ProcessHandler) GetProcessesData(c *fiber.Ctx) error {
|
||||
// Check if this is a manual refresh request (with X-Requested-With header set)
|
||||
isManualRefresh := c.Get("X-Requested-With") == "XMLHttpRequest"
|
||||
|
||||
// Check if StatsManager is properly initialized
|
||||
if h.statsManager == nil {
|
||||
return c.Render("admin/system/processes_data", fiber.Map{
|
||||
"error": "System error: Stats manager not initialized",
|
||||
"layout": "",
|
||||
})
|
||||
}
|
||||
|
||||
// For manual refresh, always get fresh data by forcing cache invalidation
|
||||
var processData *stats.ProcessStats
|
||||
var err error
|
||||
|
||||
// Force cache refresh for process stats on manual refresh
|
||||
if isManualRefresh {
|
||||
h.statsManager.ForceUpdate("process")
|
||||
}
|
||||
|
||||
if isManualRefresh {
|
||||
// Force bypass cache for manual refresh by using fresh data
|
||||
processData, err = h.statsManager.GetProcessStatsFresh(0)
|
||||
} else {
|
||||
// Use cached data for auto-polling
|
||||
processData, err = h.statsManager.GetProcessStats(0)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Try alternative method if the primary method fails
|
||||
if isManualRefresh {
|
||||
processData, err = h.statsManager.GetProcessStats(0)
|
||||
} else {
|
||||
processData, err = h.statsManager.GetProcessStatsFresh(0)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Handle AJAX requests differently from regular requests
|
||||
isAjax := c.Get("X-Requested-With") == "XMLHttpRequest"
|
||||
if isAjax {
|
||||
return c.Status(fiber.StatusInternalServerError).SendString("Failed to get process data: " + err.Error())
|
||||
}
|
||||
// For regular requests, render the error within the fragment
|
||||
return c.Render("admin/system/processes_data", fiber.Map{
|
||||
"error": "Failed to get process data: " + err.Error(),
|
||||
"layout": "",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to []fiber.Map for template rendering
|
||||
processStats := make([]fiber.Map, len(processData.Processes))
|
||||
for i, proc := range processData.Processes {
|
||||
processStats[i] = fiber.Map{
|
||||
"pid": proc.PID,
|
||||
"name": proc.Name,
|
||||
"status": proc.Status,
|
||||
"cpu_percent": proc.CPUPercent,
|
||||
"memory_mb": proc.MemoryMB,
|
||||
"create_time_str": proc.CreateTime,
|
||||
"is_current": proc.IsCurrent,
|
||||
"cpu_percent_str": fmt.Sprintf("%.1f%%", proc.CPUPercent),
|
||||
"memory_mb_str": fmt.Sprintf("%.1f MB", proc.MemoryMB),
|
||||
}
|
||||
}
|
||||
|
||||
// Create a boolean to indicate if we have processes
|
||||
hasProcesses := len(processStats) > 0
|
||||
|
||||
// Create template data with fiber.Map
|
||||
templateData := fiber.Map{
|
||||
"hasProcesses": hasProcesses,
|
||||
"processCount": len(processStats),
|
||||
"processStats": processStats,
|
||||
"layout": "", // Disable layout for partial template
|
||||
}
|
||||
|
||||
// Return only the table HTML content directly to be injected into the processes-table-content div
|
||||
return c.Render("admin/system/processes_data", templateData)
|
||||
}
|
266
_pkg2_dont_use/heroagent/handlers/service_handlers.go
Normal file
266
_pkg2_dont_use/heroagent/handlers/service_handlers.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/processmanager/interfaces"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/processmanager/interfaces/openrpc"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// ServiceHandler handles service-related routes
|
||||
type ServiceHandler struct {
|
||||
client *openrpc.Client
|
||||
}
|
||||
|
||||
// NewServiceHandler creates a new ServiceHandler
|
||||
func NewServiceHandler(socketPath, secret string) *ServiceHandler {
|
||||
fmt.Printf("DEBUG: Creating new ServiceHandler with socket path: %s and secret: %s\n", socketPath, secret)
|
||||
return &ServiceHandler{
|
||||
client: openrpc.NewClient(socketPath, secret),
|
||||
}
|
||||
}
|
||||
|
||||
// GetServices renders the services page
|
||||
func (h *ServiceHandler) GetServices(c *fiber.Ctx) error {
|
||||
return c.Render("admin/services", fiber.Map{
|
||||
"title": "Services",
|
||||
"error": c.Query("error", ""),
|
||||
"warning": c.Query("warning", ""),
|
||||
})
|
||||
}
|
||||
|
||||
// GetServicesFragment returns the services table fragment for Unpoly updates
|
||||
func (h *ServiceHandler) GetServicesFragment(c *fiber.Ctx) error {
|
||||
processes, err := h.getProcessList()
|
||||
if err != nil {
|
||||
return c.Render("admin/services_fragment", fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to fetch services: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.Render("admin/services_fragment", fiber.Map{
|
||||
"processes": processes,
|
||||
})
|
||||
}
|
||||
|
||||
// StartService handles the request to start a new service
|
||||
func (h *ServiceHandler) StartService(c *fiber.Ctx) error {
|
||||
name := c.FormValue("name")
|
||||
command := c.FormValue("command")
|
||||
|
||||
if name == "" || command == "" {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": "Service name and command are required",
|
||||
})
|
||||
}
|
||||
|
||||
// Default to enabling logs
|
||||
logEnabled := true
|
||||
|
||||
// Start the process with no deadline, no cron, and no job ID
|
||||
fmt.Printf("DEBUG: StartService called for '%s' using client: %p\n", name, h.client)
|
||||
result, err := h.client.StartProcess(name, command, logEnabled, 0, "", "")
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to start service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": result.Message,
|
||||
"pid": result.PID,
|
||||
})
|
||||
}
|
||||
|
||||
// StopService handles the request to stop a service
|
||||
func (h *ServiceHandler) StopService(c *fiber.Ctx) error {
|
||||
name := c.FormValue("name")
|
||||
|
||||
if name == "" {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": "Service name is required",
|
||||
})
|
||||
}
|
||||
|
||||
result, err := h.client.StopProcess(name)
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to stop service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
// RestartService handles the request to restart a service
|
||||
func (h *ServiceHandler) RestartService(c *fiber.Ctx) error {
|
||||
name := c.FormValue("name")
|
||||
|
||||
if name == "" {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": "Service name is required",
|
||||
})
|
||||
}
|
||||
|
||||
result, err := h.client.RestartProcess(name)
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to restart service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": result.Message,
|
||||
"pid": result.PID,
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteService handles the request to delete a service
|
||||
func (h *ServiceHandler) DeleteService(c *fiber.Ctx) error {
|
||||
name := c.FormValue("name")
|
||||
|
||||
if name == "" {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": "Service name is required",
|
||||
})
|
||||
}
|
||||
|
||||
result, err := h.client.DeleteProcess(name)
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to delete service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
// GetServiceLogs handles the request to get logs for a service
|
||||
func (h *ServiceHandler) GetServiceLogs(c *fiber.Ctx) error {
|
||||
name := c.Query("name")
|
||||
lines := c.QueryInt("lines", 100)
|
||||
|
||||
fmt.Printf("DEBUG: GetServiceLogs called for service '%s' using client: %p\n", name, h.client)
|
||||
|
||||
if name == "" {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": "Service name is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Debug: List all processes before getting logs
|
||||
processes, listErr := h.getProcessList()
|
||||
if listErr == nil {
|
||||
fmt.Println("DEBUG: Current processes in service handler:")
|
||||
for _, proc := range processes {
|
||||
fmt.Printf("DEBUG: - '%v' (PID: %v, Status: %v)\n", proc["Name"], proc["ID"], proc["Status"])
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("DEBUG: Error listing processes: %v\n", listErr)
|
||||
}
|
||||
|
||||
result, err := h.client.GetProcessLogs(name, lines)
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to get service logs: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"logs": result.Logs,
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to get the list of processes and format them for the UI
|
||||
func (h *ServiceHandler) getProcessList() ([]fiber.Map, error) {
|
||||
// Get the list of processes
|
||||
result, err := h.client.ListProcesses("json")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list processes: %v", err)
|
||||
}
|
||||
|
||||
// Convert the result to a slice of ProcessStatus
|
||||
processList, ok := result.([]interfaces.ProcessStatus)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected result type from ListProcesses")
|
||||
}
|
||||
|
||||
// Format the processes for the UI
|
||||
formattedProcesses := make([]fiber.Map, 0, len(processList))
|
||||
for _, proc := range processList {
|
||||
// Calculate uptime
|
||||
uptime := "N/A"
|
||||
if proc.Status == "running" {
|
||||
duration := time.Since(proc.StartTime)
|
||||
if duration.Hours() >= 24 {
|
||||
days := int(duration.Hours() / 24)
|
||||
hours := int(duration.Hours()) % 24
|
||||
uptime = fmt.Sprintf("%dd %dh", days, hours)
|
||||
} else if duration.Hours() >= 1 {
|
||||
hours := int(duration.Hours())
|
||||
minutes := int(duration.Minutes()) % 60
|
||||
uptime = fmt.Sprintf("%dh %dm", hours, minutes)
|
||||
} else {
|
||||
minutes := int(duration.Minutes())
|
||||
seconds := int(duration.Seconds()) % 60
|
||||
uptime = fmt.Sprintf("%dm %ds", minutes, seconds)
|
||||
}
|
||||
}
|
||||
|
||||
// Format CPU and memory usage
|
||||
cpuUsage := fmt.Sprintf("%.1f%%", proc.CPUPercent)
|
||||
memoryUsage := fmt.Sprintf("%.1f MB", proc.MemoryMB)
|
||||
|
||||
formattedProcesses = append(formattedProcesses, fiber.Map{
|
||||
"Name": proc.Name,
|
||||
"Status": string(proc.Status),
|
||||
"ID": proc.PID,
|
||||
"CPU": cpuUsage,
|
||||
"Memory": memoryUsage,
|
||||
"Uptime": uptime,
|
||||
})
|
||||
}
|
||||
|
||||
return formattedProcesses, nil
|
||||
}
|
375
_pkg2_dont_use/heroagent/handlers/system_handlers.go
Normal file
375
_pkg2_dont_use/heroagent/handlers/system_handlers.go
Normal file
@@ -0,0 +1,375 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/system/stats"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/shirou/gopsutil/v3/host"
|
||||
)
|
||||
|
||||
// UptimeProvider defines an interface for getting system uptime
|
||||
type UptimeProvider interface {
|
||||
GetUptime() string
|
||||
}
|
||||
|
||||
// SystemHandler handles system-related page routes
|
||||
type SystemHandler struct {
|
||||
uptimeProvider UptimeProvider
|
||||
statsManager *stats.StatsManager
|
||||
}
|
||||
|
||||
// NewSystemHandler creates a new SystemHandler
|
||||
func NewSystemHandler(uptimeProvider UptimeProvider, statsManager *stats.StatsManager) *SystemHandler {
|
||||
// If statsManager is nil, create a new one with default settings
|
||||
if statsManager == nil {
|
||||
var err error
|
||||
statsManager, err = stats.NewStatsManagerWithDefaults()
|
||||
if err != nil {
|
||||
// Log the error but continue with nil statsManager
|
||||
fmt.Printf("Error creating StatsManager: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &SystemHandler{
|
||||
uptimeProvider: uptimeProvider,
|
||||
statsManager: statsManager,
|
||||
}
|
||||
}
|
||||
|
||||
// GetSystemInfo renders the system info page
|
||||
func (h *SystemHandler) GetSystemInfo(c *fiber.Ctx) error {
|
||||
// Initialize default values
|
||||
cpuInfo := "Unknown"
|
||||
memoryInfo := "Unknown"
|
||||
diskInfo := "Unknown"
|
||||
networkInfo := "Unknown"
|
||||
osInfo := "Unknown"
|
||||
uptimeInfo := "Unknown"
|
||||
|
||||
// Get hardware stats from the StatsManager
|
||||
var hardwareStats map[string]interface{}
|
||||
if h.statsManager != nil {
|
||||
hardwareStats = h.statsManager.GetHardwareStats()
|
||||
} else {
|
||||
// Fallback to direct function call if StatsManager is not available
|
||||
hardwareStats = stats.GetHardwareStats()
|
||||
}
|
||||
|
||||
// Extract the formatted strings - safely handle different return types
|
||||
if cpuVal, ok := hardwareStats["cpu"]; ok {
|
||||
switch v := cpuVal.(type) {
|
||||
case string:
|
||||
cpuInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
if model, ok := v["model"].(string); ok {
|
||||
usage := 0.0
|
||||
if usagePercent, ok := v["usage_percent"].(float64); ok {
|
||||
usage = usagePercent
|
||||
}
|
||||
cpuInfo = fmt.Sprintf("%s (Usage: %.1f%%)", model, usage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if memVal, ok := hardwareStats["memory"]; ok {
|
||||
switch v := memVal.(type) {
|
||||
case string:
|
||||
memoryInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
usedPercent := 0.0
|
||||
if percent, ok := v["used_percent"].(float64); ok {
|
||||
usedPercent = percent
|
||||
}
|
||||
memoryInfo = fmt.Sprintf("%.1f GB / %.1f GB (%.1f%% used)", used, total, usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
if diskVal, ok := hardwareStats["disk"]; ok {
|
||||
switch v := diskVal.(type) {
|
||||
case string:
|
||||
diskInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
usedPercent := 0.0
|
||||
if percent, ok := v["used_percent"].(float64); ok {
|
||||
usedPercent = percent
|
||||
}
|
||||
diskInfo = fmt.Sprintf("%.1f GB / %.1f GB (%.1f%% used)", used, total, usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
if netVal, ok := hardwareStats["network"]; ok {
|
||||
switch v := netVal.(type) {
|
||||
case string:
|
||||
networkInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
var interfaces []string
|
||||
if ifaces, ok := v["interfaces"].([]interface{}); ok {
|
||||
for _, iface := range ifaces {
|
||||
if ifaceMap, ok := iface.(map[string]interface{}); ok {
|
||||
name := ifaceMap["name"].(string)
|
||||
ip := ifaceMap["ip"].(string)
|
||||
interfaces = append(interfaces, fmt.Sprintf("%s: %s", name, ip))
|
||||
}
|
||||
}
|
||||
networkInfo = strings.Join(interfaces, ", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get OS info
|
||||
hostInfo, err := host.Info()
|
||||
if err == nil {
|
||||
osInfo = fmt.Sprintf("%s %s (%s)", hostInfo.Platform, hostInfo.PlatformVersion, hostInfo.KernelVersion)
|
||||
}
|
||||
|
||||
// Get uptime
|
||||
if h.uptimeProvider != nil {
|
||||
uptimeInfo = h.uptimeProvider.GetUptime()
|
||||
}
|
||||
|
||||
// Render the template with the system info
|
||||
return c.Render("admin/system/info", fiber.Map{
|
||||
"title": "System Information",
|
||||
"cpuInfo": cpuInfo,
|
||||
"memoryInfo": memoryInfo,
|
||||
"diskInfo": diskInfo,
|
||||
"networkInfo": networkInfo,
|
||||
"osInfo": osInfo,
|
||||
"uptimeInfo": uptimeInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// GetHardwareStats returns only the hardware stats for Unpoly polling
|
||||
func (h *SystemHandler) GetHardwareStats(c *fiber.Ctx) error {
|
||||
// Initialize default values
|
||||
cpuInfo := "Unknown"
|
||||
memoryInfo := "Unknown"
|
||||
diskInfo := "Unknown"
|
||||
networkInfo := "Unknown"
|
||||
|
||||
// Get hardware stats from the StatsManager
|
||||
var hardwareStats map[string]interface{}
|
||||
if h.statsManager != nil {
|
||||
hardwareStats = h.statsManager.GetHardwareStats()
|
||||
} else {
|
||||
// Fallback to direct function call if StatsManager is not available
|
||||
hardwareStats = stats.GetHardwareStats()
|
||||
}
|
||||
|
||||
// Extract the formatted strings - safely handle different return types
|
||||
if cpuVal, ok := hardwareStats["cpu"]; ok {
|
||||
switch v := cpuVal.(type) {
|
||||
case string:
|
||||
cpuInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
if model, ok := v["model"].(string); ok {
|
||||
cpuInfo = model
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if memVal, ok := hardwareStats["memory"]; ok {
|
||||
switch v := memVal.(type) {
|
||||
case string:
|
||||
memoryInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
memoryInfo = fmt.Sprintf("%.1f GB / %.1f GB", used, total)
|
||||
}
|
||||
}
|
||||
|
||||
if diskVal, ok := hardwareStats["disk"]; ok {
|
||||
switch v := diskVal.(type) {
|
||||
case string:
|
||||
diskInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
diskInfo = fmt.Sprintf("%.1f GB / %.1f GB", used, total)
|
||||
}
|
||||
}
|
||||
|
||||
if netVal, ok := hardwareStats["network"]; ok {
|
||||
switch v := netVal.(type) {
|
||||
case string:
|
||||
networkInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
var interfaces []string
|
||||
if ifaces, ok := v["interfaces"].([]interface{}); ok {
|
||||
for _, iface := range ifaces {
|
||||
if ifaceMap, ok := iface.(map[string]interface{}); ok {
|
||||
name := ifaceMap["name"].(string)
|
||||
ip := ifaceMap["ip"].(string)
|
||||
interfaces = append(interfaces, fmt.Sprintf("%s: %s", name, ip))
|
||||
}
|
||||
}
|
||||
networkInfo = strings.Join(interfaces, ", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format for display
|
||||
cpuUsage := "0.0%"
|
||||
memUsage := "0.0%"
|
||||
diskUsage := "0.0%"
|
||||
|
||||
// Safely extract usage percentages
|
||||
if cpuVal, ok := hardwareStats["cpu"].(map[string]interface{}); ok {
|
||||
if usagePercent, ok := cpuVal["usage_percent"].(float64); ok {
|
||||
cpuUsage = fmt.Sprintf("%.1f%%", usagePercent)
|
||||
}
|
||||
}
|
||||
|
||||
if memVal, ok := hardwareStats["memory"].(map[string]interface{}); ok {
|
||||
if usedPercent, ok := memVal["used_percent"].(float64); ok {
|
||||
memUsage = fmt.Sprintf("%.1f%%", usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
if diskVal, ok := hardwareStats["disk"].(map[string]interface{}); ok {
|
||||
if usedPercent, ok := diskVal["used_percent"].(float64); ok {
|
||||
diskUsage = fmt.Sprintf("%.1f%%", usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
// Render only the hardware stats fragment
|
||||
return c.Render("admin/system/hardware_stats_fragment", fiber.Map{
|
||||
"cpuInfo": cpuInfo,
|
||||
"memoryInfo": memoryInfo,
|
||||
"diskInfo": diskInfo,
|
||||
"networkInfo": networkInfo,
|
||||
"cpuUsage": cpuUsage,
|
||||
"memUsage": memUsage,
|
||||
"diskUsage": diskUsage,
|
||||
})
|
||||
}
|
||||
|
||||
// GetHardwareStatsAPI returns hardware stats in JSON format
|
||||
func (h *SystemHandler) GetHardwareStatsAPI(c *fiber.Ctx) error {
|
||||
// Get hardware stats from the StatsManager
|
||||
var hardwareStats map[string]interface{}
|
||||
if h.statsManager != nil {
|
||||
hardwareStats = h.statsManager.GetHardwareStats()
|
||||
} else {
|
||||
// Fallback to direct function call if StatsManager is not available
|
||||
hardwareStats = stats.GetHardwareStats()
|
||||
}
|
||||
|
||||
return c.JSON(hardwareStats)
|
||||
}
|
||||
|
||||
// GetProcessStatsAPI returns process stats in JSON format for API consumption
|
||||
func (h *SystemHandler) GetProcessStatsAPI(c *fiber.Ctx) error {
|
||||
// Check if StatsManager is properly initialized
|
||||
if h.statsManager == nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "System error: Stats manager not initialized",
|
||||
})
|
||||
}
|
||||
|
||||
// Get process data from the StatsManager
|
||||
processData, err := h.statsManager.GetProcessStatsFresh(100) // Limit to 100 processes
|
||||
if err != nil {
|
||||
// Try getting cached data as fallback
|
||||
processData, err = h.statsManager.GetProcessStats(100)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "Failed to get process data: " + err.Error(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to fiber.Map for JSON response
|
||||
response := fiber.Map{
|
||||
"total": processData.Total,
|
||||
"filtered": processData.Filtered,
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
|
||||
// Convert processes to a slice of maps
|
||||
processes := make([]fiber.Map, len(processData.Processes))
|
||||
for i, proc := range processData.Processes {
|
||||
processes[i] = fiber.Map{
|
||||
"pid": proc.PID,
|
||||
"name": proc.Name,
|
||||
"status": proc.Status,
|
||||
"cpu_percent": proc.CPUPercent,
|
||||
"memory_mb": proc.MemoryMB,
|
||||
"create_time_str": proc.CreateTime,
|
||||
"is_current": proc.IsCurrent,
|
||||
}
|
||||
}
|
||||
|
||||
response["processes"] = processes
|
||||
|
||||
// Return JSON response
|
||||
return c.JSON(response)
|
||||
}
|
||||
|
||||
// GetSystemLogs renders the system logs page
|
||||
func (h *SystemHandler) GetSystemLogs(c *fiber.Ctx) error {
|
||||
return c.Render("admin/system/logs", fiber.Map{
|
||||
"title": "System Logs",
|
||||
})
|
||||
}
|
||||
|
||||
// GetSystemLogsTest renders the test logs page
|
||||
func (h *SystemHandler) GetSystemLogsTest(c *fiber.Ctx) error {
|
||||
return c.Render("admin/system/logs_test", fiber.Map{
|
||||
"title": "Test Logs",
|
||||
})
|
||||
}
|
||||
|
||||
// GetSystemSettings renders the system settings page
|
||||
func (h *SystemHandler) GetSystemSettings(c *fiber.Ctx) error {
|
||||
// Get the current time
|
||||
currentTime := time.Now().Format("2006-01-02 15:04:05")
|
||||
|
||||
// Render the template with the system settings
|
||||
return c.Render("admin/system/settings", fiber.Map{
|
||||
"title": "System Settings",
|
||||
"currentTime": currentTime,
|
||||
"settings": map[string]interface{}{
|
||||
"autoUpdate": true,
|
||||
"logLevel": "info",
|
||||
"maxLogSize": "100MB",
|
||||
"backupFrequency": "Daily",
|
||||
},
|
||||
})
|
||||
}
|
541
_pkg2_dont_use/heroagent/pages/admin.go
Normal file
541
_pkg2_dont_use/heroagent/pages/admin.go
Normal file
@@ -0,0 +1,541 @@
|
||||
package pages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/heroagent/handlers"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/system/stats"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/shirou/gopsutil/v3/host"
|
||||
)
|
||||
|
||||
// UptimeProvider defines an interface for getting system uptime
|
||||
type UptimeProvider interface {
|
||||
GetUptime() string
|
||||
}
|
||||
|
||||
// AdminHandler handles admin-related page routes
|
||||
type AdminHandler struct {
|
||||
uptimeProvider UptimeProvider
|
||||
statsManager *stats.StatsManager
|
||||
pmSocketPath string
|
||||
pmSecret string
|
||||
}
|
||||
|
||||
// NewAdminHandler creates a new AdminHandler
|
||||
func NewAdminHandler(uptimeProvider UptimeProvider, statsManager *stats.StatsManager, pmSocketPath, pmSecret string) *AdminHandler {
|
||||
// If statsManager is nil, create a new one with default settings
|
||||
if statsManager == nil {
|
||||
var err error
|
||||
statsManager, err = stats.NewStatsManagerWithDefaults()
|
||||
if err != nil {
|
||||
// Log the error but continue with nil statsManager
|
||||
fmt.Printf("Error creating StatsManager: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &AdminHandler{
|
||||
uptimeProvider: uptimeProvider,
|
||||
statsManager: statsManager,
|
||||
pmSocketPath: pmSocketPath,
|
||||
pmSecret: pmSecret,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterRoutes registers all admin page routes
|
||||
func (h *AdminHandler) RegisterRoutes(app *fiber.App) {
|
||||
// Admin routes
|
||||
admin := app.Group("/admin")
|
||||
|
||||
// Dashboard
|
||||
admin.Get("/", h.getDashboard)
|
||||
|
||||
// Create service handler with the correct socket path and secret
|
||||
serviceHandler := handlers.NewServiceHandler(h.pmSocketPath, h.pmSecret)
|
||||
// Services routes
|
||||
admin.Get("/services", serviceHandler.GetServices)
|
||||
admin.Get("/services/data", serviceHandler.GetServicesFragment)
|
||||
admin.Post("/services/start", serviceHandler.StartService)
|
||||
admin.Post("/services/stop", serviceHandler.StopService)
|
||||
admin.Post("/services/restart", serviceHandler.RestartService)
|
||||
admin.Post("/services/delete", serviceHandler.DeleteService)
|
||||
admin.Get("/services/logs", serviceHandler.GetServiceLogs)
|
||||
|
||||
// System routes
|
||||
admin.Get("/system/info", h.getSystemInfo)
|
||||
admin.Get("/system/hardware-stats", h.getHardwareStats)
|
||||
|
||||
// Create process handler
|
||||
processHandler := handlers.NewProcessHandler(h.statsManager)
|
||||
admin.Get("/system/processes", processHandler.GetProcesses)
|
||||
admin.Get("/system/processes-data", processHandler.GetProcessesData)
|
||||
|
||||
// Create log handler
|
||||
// Ensure log directory exists
|
||||
// Using the same shared logs path as process manager
|
||||
logDir := filepath.Join(os.TempDir(), "heroagent_logs")
|
||||
if err := os.MkdirAll(logDir, 0755); err != nil {
|
||||
fmt.Printf("Error creating log directory: %v\n", err)
|
||||
}
|
||||
|
||||
logHandler, err := handlers.NewLogHandler(logDir)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating log handler: %v\n", err)
|
||||
// Fallback to old implementation if log handler creation failed
|
||||
admin.Get("/system/logs", h.getSystemLogs)
|
||||
admin.Get("/system/logs-test", h.getSystemLogsTest)
|
||||
} else {
|
||||
fmt.Printf("Log handler created successfully\n")
|
||||
// Use the log handler for log routes
|
||||
admin.Get("/system/logs", logHandler.GetLogs)
|
||||
// Keep the fragment endpoint for backward compatibility
|
||||
// but it now just redirects to the main logs endpoint
|
||||
admin.Get("/system/logs-fragment", logHandler.GetLogsFragment)
|
||||
admin.Get("/system/logs-test", h.getSystemLogsTest) // Keep the test logs route
|
||||
|
||||
// Log API endpoints
|
||||
app.Get("/api/logs", logHandler.GetLogsAPI)
|
||||
}
|
||||
|
||||
admin.Get("/system/settings", h.getSystemSettings)
|
||||
|
||||
// OpenRPC routes
|
||||
admin.Get("/openrpc", h.getOpenRPCManager)
|
||||
admin.Get("/openrpc/vfs", h.getOpenRPCVFS)
|
||||
admin.Get("/openrpc/vfs/logs", h.getOpenRPCVFSLogs)
|
||||
|
||||
// Redirect root to admin
|
||||
app.Get("/", func(c *fiber.Ctx) error {
|
||||
return c.Redirect("/admin")
|
||||
})
|
||||
}
|
||||
|
||||
// getDashboard renders the admin dashboard
|
||||
func (h *AdminHandler) getDashboard(c *fiber.Ctx) error {
|
||||
return c.Render("admin/index", fiber.Map{
|
||||
"title": "Dashboard",
|
||||
})
|
||||
}
|
||||
|
||||
// getSystemInfo renders the system info page
|
||||
func (h *AdminHandler) getSystemInfo(c *fiber.Ctx) error {
|
||||
// Initialize default values
|
||||
cpuInfo := "Unknown"
|
||||
memoryInfo := "Unknown"
|
||||
diskInfo := "Unknown"
|
||||
networkInfo := "Unknown"
|
||||
osInfo := "Unknown"
|
||||
uptimeInfo := "Unknown"
|
||||
|
||||
// Get hardware stats from the StatsManager
|
||||
var hardwareStats map[string]interface{}
|
||||
if h.statsManager != nil {
|
||||
hardwareStats = h.statsManager.GetHardwareStats()
|
||||
} else {
|
||||
// Fallback to direct function call if StatsManager is not available
|
||||
hardwareStats = stats.GetHardwareStats()
|
||||
}
|
||||
|
||||
// Extract the formatted strings - safely handle different return types
|
||||
if cpuVal, ok := hardwareStats["cpu"]; ok {
|
||||
switch v := cpuVal.(type) {
|
||||
case string:
|
||||
cpuInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
if model, ok := v["model"].(string); ok {
|
||||
usage := 0.0
|
||||
if usagePercent, ok := v["usage_percent"].(float64); ok {
|
||||
usage = usagePercent
|
||||
}
|
||||
cpuInfo = fmt.Sprintf("%s (Usage: %.1f%%)", model, usage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if memVal, ok := hardwareStats["memory"]; ok {
|
||||
switch v := memVal.(type) {
|
||||
case string:
|
||||
memoryInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
usedPercent := 0.0
|
||||
if percent, ok := v["used_percent"].(float64); ok {
|
||||
usedPercent = percent
|
||||
}
|
||||
memoryInfo = fmt.Sprintf("%.1f GB / %.1f GB (%.1f%% used)", used, total, usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
if diskVal, ok := hardwareStats["disk"]; ok {
|
||||
switch v := diskVal.(type) {
|
||||
case string:
|
||||
diskInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
usedPercent := 0.0
|
||||
if percent, ok := v["used_percent"].(float64); ok {
|
||||
usedPercent = percent
|
||||
}
|
||||
diskInfo = fmt.Sprintf("%.1f GB / %.1f GB (%.1f%% used)", used, total, usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
if netVal, ok := hardwareStats["network"]; ok {
|
||||
switch v := netVal.(type) {
|
||||
case string:
|
||||
networkInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
var interfaces []string
|
||||
if ifaces, ok := v["interfaces"].([]interface{}); ok {
|
||||
for _, iface := range ifaces {
|
||||
if ifaceMap, ok := iface.(map[string]interface{}); ok {
|
||||
name := ifaceMap["name"].(string)
|
||||
ip := ifaceMap["ip"].(string)
|
||||
interfaces = append(interfaces, fmt.Sprintf("%s: %s", name, ip))
|
||||
}
|
||||
}
|
||||
networkInfo = strings.Join(interfaces, ", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get OS info
|
||||
hostInfo, err := host.Info()
|
||||
if err == nil {
|
||||
osInfo = fmt.Sprintf("%s %s (%s)", hostInfo.Platform, hostInfo.PlatformVersion, hostInfo.KernelVersion)
|
||||
}
|
||||
|
||||
// Get uptime
|
||||
if h.uptimeProvider != nil {
|
||||
uptimeInfo = h.uptimeProvider.GetUptime()
|
||||
}
|
||||
|
||||
// Render the template with the system info
|
||||
return c.Render("admin/system/info", fiber.Map{
|
||||
"title": "System Information",
|
||||
"cpuInfo": cpuInfo,
|
||||
"memoryInfo": memoryInfo,
|
||||
"diskInfo": diskInfo,
|
||||
"networkInfo": networkInfo,
|
||||
"osInfo": osInfo,
|
||||
"uptimeInfo": uptimeInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// getSystemLogs renders the system logs page
|
||||
func (h *AdminHandler) getSystemLogs(c *fiber.Ctx) error {
|
||||
return c.Render("admin/system/logs", fiber.Map{
|
||||
"title": "System Logs",
|
||||
})
|
||||
}
|
||||
|
||||
// getSystemLogsTest renders the test logs page
|
||||
func (h *AdminHandler) getSystemLogsTest(c *fiber.Ctx) error {
|
||||
return c.Render("admin/system/logs_test", fiber.Map{
|
||||
"title": "Test Logs",
|
||||
})
|
||||
}
|
||||
|
||||
// getSystemSettings renders the system settings page
|
||||
func (h *AdminHandler) getSystemSettings(c *fiber.Ctx) error {
|
||||
// Get system settings
|
||||
// This is a placeholder - in a real app, you would fetch settings from a database or config file
|
||||
settings := map[string]interface{}{
|
||||
"logLevel": "info",
|
||||
"enableDebugMode": false,
|
||||
"dataDirectory": "/var/lib/heroagent",
|
||||
"maxLogSize": "100MB",
|
||||
}
|
||||
|
||||
return c.Render("admin/system/settings", fiber.Map{
|
||||
"title": "System Settings",
|
||||
"settings": settings,
|
||||
})
|
||||
}
|
||||
|
||||
// getHardwareStats returns only the hardware stats for Unpoly polling
|
||||
func (h *AdminHandler) getHardwareStats(c *fiber.Ctx) error {
|
||||
// Initialize default values
|
||||
cpuInfo := "Unknown"
|
||||
memoryInfo := "Unknown"
|
||||
diskInfo := "Unknown"
|
||||
networkInfo := "Unknown"
|
||||
|
||||
// Get hardware stats from the StatsManager
|
||||
var hardwareStats map[string]interface{}
|
||||
if h.statsManager != nil {
|
||||
hardwareStats = h.statsManager.GetHardwareStats()
|
||||
} else {
|
||||
// Fallback to direct function call if StatsManager is not available
|
||||
hardwareStats = stats.GetHardwareStats()
|
||||
}
|
||||
|
||||
// Extract the formatted strings - safely handle different return types
|
||||
if cpuVal, ok := hardwareStats["cpu"]; ok {
|
||||
switch v := cpuVal.(type) {
|
||||
case string:
|
||||
cpuInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
if model, ok := v["model"].(string); ok {
|
||||
cpuInfo = model
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if memVal, ok := hardwareStats["memory"]; ok {
|
||||
switch v := memVal.(type) {
|
||||
case string:
|
||||
memoryInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
memoryInfo = fmt.Sprintf("%.1f GB / %.1f GB", used, total)
|
||||
}
|
||||
}
|
||||
|
||||
if diskVal, ok := hardwareStats["disk"]; ok {
|
||||
switch v := diskVal.(type) {
|
||||
case string:
|
||||
diskInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
diskInfo = fmt.Sprintf("%.1f GB / %.1f GB", used, total)
|
||||
}
|
||||
}
|
||||
|
||||
if netVal, ok := hardwareStats["network"]; ok {
|
||||
switch v := netVal.(type) {
|
||||
case string:
|
||||
networkInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
var interfaces []string
|
||||
if ifaces, ok := v["interfaces"].([]interface{}); ok {
|
||||
for _, iface := range ifaces {
|
||||
if ifaceMap, ok := iface.(map[string]interface{}); ok {
|
||||
name := ifaceMap["name"].(string)
|
||||
ip := ifaceMap["ip"].(string)
|
||||
interfaces = append(interfaces, fmt.Sprintf("%s: %s", name, ip))
|
||||
}
|
||||
}
|
||||
networkInfo = strings.Join(interfaces, ", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format for display
|
||||
cpuUsage := "0.0%"
|
||||
memUsage := "0.0%"
|
||||
diskUsage := "0.0%"
|
||||
|
||||
// Safely extract usage percentages
|
||||
if cpuVal, ok := hardwareStats["cpu"].(map[string]interface{}); ok {
|
||||
if usagePercent, ok := cpuVal["usage_percent"].(float64); ok {
|
||||
cpuUsage = fmt.Sprintf("%.1f%%", usagePercent)
|
||||
}
|
||||
}
|
||||
|
||||
if memVal, ok := hardwareStats["memory"].(map[string]interface{}); ok {
|
||||
if usedPercent, ok := memVal["used_percent"].(float64); ok {
|
||||
memUsage = fmt.Sprintf("%.1f%%", usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
if diskVal, ok := hardwareStats["disk"].(map[string]interface{}); ok {
|
||||
if usedPercent, ok := diskVal["used_percent"].(float64); ok {
|
||||
diskUsage = fmt.Sprintf("%.1f%%", usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
// Render only the hardware stats fragment
|
||||
return c.Render("admin/system/hardware_stats_fragment", fiber.Map{
|
||||
"cpuInfo": cpuInfo,
|
||||
"memoryInfo": memoryInfo,
|
||||
"diskInfo": diskInfo,
|
||||
"networkInfo": networkInfo,
|
||||
"cpuUsage": cpuUsage,
|
||||
"memUsage": memUsage,
|
||||
"diskUsage": diskUsage,
|
||||
})
|
||||
}
|
||||
|
||||
// getProcesses has been moved to the handlers package
|
||||
// See handlers.ProcessHandler.GetProcesses
|
||||
|
||||
// getOpenRPCManager renders the OpenRPC Manager view page
|
||||
func (h *AdminHandler) getOpenRPCManager(c *fiber.Ctx) error {
|
||||
return c.Render("admin/openrpc/index", fiber.Map{
|
||||
"title": "OpenRPC Manager",
|
||||
})
|
||||
}
|
||||
|
||||
// getOpenRPCVFS renders the OpenRPC VFS view page
|
||||
func (h *AdminHandler) getOpenRPCVFS(c *fiber.Ctx) error {
|
||||
return c.Render("admin/openrpc/vfs", fiber.Map{
|
||||
"title": "VFS OpenRPC Interface",
|
||||
})
|
||||
}
|
||||
|
||||
// getOpenRPCVFSLogs renders the OpenRPC logs content for Unpoly or direct access
|
||||
func (h *AdminHandler) getOpenRPCVFSLogs(c *fiber.Ctx) error {
|
||||
// Get query parameters
|
||||
method := c.Query("method", "")
|
||||
params := c.Query("params", "")
|
||||
|
||||
// Define available methods and their display names
|
||||
methods := []string{
|
||||
"vfs_ls",
|
||||
"vfs_read",
|
||||
"vfs_write",
|
||||
"vfs_mkdir",
|
||||
"vfs_rm",
|
||||
"vfs_mv",
|
||||
"vfs_cp",
|
||||
"vfs_exists",
|
||||
"vfs_isdir",
|
||||
"vfs_isfile",
|
||||
}
|
||||
|
||||
methodDisplayNames := map[string]string{
|
||||
"vfs_ls": "List Directory",
|
||||
"vfs_read": "Read File",
|
||||
"vfs_write": "Write File",
|
||||
"vfs_mkdir": "Create Directory",
|
||||
"vfs_rm": "Remove File/Directory",
|
||||
"vfs_mv": "Move/Rename",
|
||||
"vfs_cp": "Copy",
|
||||
"vfs_exists": "Check Exists",
|
||||
"vfs_isdir": "Is Directory",
|
||||
"vfs_isfile": "Is File",
|
||||
}
|
||||
|
||||
// Generate method options HTML
|
||||
methodOptions := generateMethodOptions(methods, methodDisplayNames)
|
||||
|
||||
// Initialize variables
|
||||
var requestJSON, responseJSON, responseTime string
|
||||
var hasResponse bool
|
||||
|
||||
// If a method is selected, make the OpenRPC call
|
||||
if method != "" {
|
||||
// Prepare the request
|
||||
requestJSON = fmt.Sprintf(`{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "%s",
|
||||
"params": %s,
|
||||
"id": 1
|
||||
}`, method, params)
|
||||
|
||||
// In a real implementation, we would make the actual OpenRPC call here
|
||||
// For now, we'll just simulate a response
|
||||
|
||||
// Simulate response time (would be real in production)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
responseTime = "100ms"
|
||||
|
||||
// Simulate a response based on the method
|
||||
switch method {
|
||||
case "vfs_ls":
|
||||
responseJSON = `{
|
||||
"jsonrpc": "2.0",
|
||||
"result": [
|
||||
{"name": "file1.txt", "size": 1024, "isDir": false, "modTime": "2023-01-01T12:00:00Z"},
|
||||
{"name": "dir1", "size": 0, "isDir": true, "modTime": "2023-01-01T12:00:00Z"}
|
||||
],
|
||||
"id": 1
|
||||
}`
|
||||
case "vfs_read":
|
||||
responseJSON = `{
|
||||
"jsonrpc": "2.0",
|
||||
"result": "File content would be here",
|
||||
"id": 1
|
||||
}`
|
||||
default:
|
||||
responseJSON = `{
|
||||
"jsonrpc": "2.0",
|
||||
"result": "Operation completed successfully",
|
||||
"id": 1
|
||||
}`
|
||||
}
|
||||
|
||||
hasResponse = true
|
||||
}
|
||||
|
||||
// Determine if this is an Unpoly request
|
||||
isUnpoly := c.Get("X-Up-Target") != ""
|
||||
|
||||
// If it's an Unpoly request, render just the logs fragment
|
||||
if isUnpoly {
|
||||
return c.Render("admin/openrpc/vfs_logs", fiber.Map{
|
||||
"methodOptions": methodOptions,
|
||||
"selectedMethod": method,
|
||||
"params": params,
|
||||
"requestJSON": requestJSON,
|
||||
"responseJSON": responseJSON,
|
||||
"responseTime": responseTime,
|
||||
"hasResponse": hasResponse,
|
||||
})
|
||||
}
|
||||
|
||||
// Otherwise render the full page
|
||||
return c.Render("admin/openrpc/vfs_overview", fiber.Map{
|
||||
"title": "VFS OpenRPC Logs",
|
||||
"methodOptions": methodOptions,
|
||||
"selectedMethod": method,
|
||||
"params": params,
|
||||
"requestJSON": requestJSON,
|
||||
"responseJSON": responseJSON,
|
||||
"responseTime": responseTime,
|
||||
"hasResponse": hasResponse,
|
||||
})
|
||||
}
|
||||
|
||||
// generateMethodOptions generates HTML option tags for method dropdown
|
||||
func generateMethodOptions(methods []string, methodDisplayNames map[string]string) string {
|
||||
var options []string
|
||||
for _, method := range methods {
|
||||
displayName, ok := methodDisplayNames[method]
|
||||
if !ok {
|
||||
displayName = method
|
||||
}
|
||||
options = append(options, fmt.Sprintf(`<option value="%s">%s</option>`, method, displayName))
|
||||
}
|
||||
return strings.Join(options, "\n")
|
||||
}
|
||||
|
||||
// Note: getProcessesData has been consolidated in the API routes file
|
||||
// to avoid duplication and ensure consistent behavior
|
187
_pkg2_dont_use/heroagent/pages/jobs.go
Normal file
187
_pkg2_dont_use/heroagent/pages/jobs.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package pages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/herojobs"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// JobDisplayInfo represents information about a job for display purposes
|
||||
type JobDisplayInfo struct {
|
||||
JobID string `json:"jobid"`
|
||||
CircleID string `json:"circleid"`
|
||||
Topic string `json:"topic"`
|
||||
Status string `json:"status"`
|
||||
SessionKey string `json:"sessionkey"`
|
||||
Params string `json:"params"`
|
||||
ParamsType string `json:"paramstype"`
|
||||
Result string `json:"result"`
|
||||
Error string `json:"error"`
|
||||
TimeScheduled int64 `json:"time_scheduled"`
|
||||
TimeStart int64 `json:"time_start"`
|
||||
TimeEnd int64 `json:"time_end"`
|
||||
Timeout int64 `json:"timeout"`
|
||||
}
|
||||
|
||||
// JobHandler handles job-related page routes
|
||||
type JobHandler struct {
|
||||
client *herojobs.RedisClient
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewJobHandler creates a new job handler with the provided socket path
|
||||
func NewJobHandler(redisAddr string, logger *log.Logger) (*JobHandler, error) {
|
||||
// Assuming SSL is false as per README example herojobs.NewRedisClient("localhost:6379", false)
|
||||
// This might need to be configurable later.
|
||||
client, err := herojobs.NewRedisClient(redisAddr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HeroJobs Redis client: %w", err)
|
||||
}
|
||||
|
||||
return &JobHandler{
|
||||
client: client,
|
||||
logger: logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RegisterRoutes registers job page routes
|
||||
func (h *JobHandler) RegisterRoutes(app *fiber.App) {
|
||||
// Register routes for /jobs
|
||||
jobs := app.Group("/jobs")
|
||||
jobs.Get("/", h.getJobsPage)
|
||||
jobs.Get("/list", h.getJobsList)
|
||||
|
||||
// Register the same routes under /admin/jobs for consistency
|
||||
adminJobs := app.Group("/admin/jobs")
|
||||
adminJobs.Get("/", h.getJobsPage)
|
||||
adminJobs.Get("/list", h.getJobsList)
|
||||
}
|
||||
|
||||
// getJobsPage renders the jobs page
|
||||
func (h *JobHandler) getJobsPage(c *fiber.Ctx) error {
|
||||
// Assuming h.client (RedisClient) is valid if NewJobHandler succeeded.
|
||||
// The client is connected on creation. A Ping method could be used here for a health check if available.
|
||||
// The previous connect/close logic per-request is removed.
|
||||
var warning string // This will be empty unless a new check (e.g., Ping) sets it.
|
||||
return c.Render("admin/jobs", fiber.Map{
|
||||
"title": "Jobs",
|
||||
"warning": warning, // warning will be empty for now
|
||||
"error": "",
|
||||
})
|
||||
}
|
||||
|
||||
// getJobsList returns the jobs list fragment for AJAX updates
|
||||
func (h *JobHandler) getJobsList(c *fiber.Ctx) error {
|
||||
// Get parameters from query
|
||||
circleID := c.Query("circleid", "")
|
||||
topic := c.Query("topic", "")
|
||||
|
||||
// Get jobs
|
||||
jobs, err := h.getJobsData(circleID, topic)
|
||||
if err != nil {
|
||||
h.logger.Printf("Error getting jobs: %v", err)
|
||||
// Return the error in the template
|
||||
return c.Render("admin/jobs_list_fragment", fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to get jobs: %v", err),
|
||||
"jobs": []JobDisplayInfo{},
|
||||
})
|
||||
}
|
||||
|
||||
// Render only the jobs fragment
|
||||
return c.Render("admin/jobs_list_fragment", fiber.Map{
|
||||
"jobs": jobs,
|
||||
})
|
||||
}
|
||||
|
||||
// getJobsData gets job data from the HeroJobs server
|
||||
func (h *JobHandler) getJobsData(circleID, topic string) ([]JobDisplayInfo, error) {
|
||||
// Assuming h.client (RedisClient) is already connected (established by NewJobHandler).
|
||||
// It should not be closed here as it's a long-lived client.
|
||||
// Connect() and Close() calls per-request are removed.
|
||||
|
||||
// If circleID and topic are not provided, try to list all jobs
|
||||
if circleID == "" && topic == "" {
|
||||
// Try to get some default jobs
|
||||
defaultCircles := []string{"default", "system"}
|
||||
defaultTopics := []string{"default", "system"}
|
||||
|
||||
var allJobs []JobDisplayInfo
|
||||
|
||||
// Try each combination
|
||||
for _, circle := range defaultCircles {
|
||||
for _, t := range defaultTopics {
|
||||
jobIDs, err := h.client.ListJobs(circle, t)
|
||||
if err != nil {
|
||||
h.logger.Printf("Could not list jobs for circle=%s, topic=%s: %v", circle, t, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, jobID := range jobIDs {
|
||||
job, err := h.client.GetJob(jobID)
|
||||
if err != nil {
|
||||
h.logger.Printf("Error getting job %s: %v", jobID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
allJobs = append(allJobs, JobDisplayInfo{
|
||||
JobID: fmt.Sprintf("%d", job.JobID),
|
||||
CircleID: job.CircleID,
|
||||
Topic: job.Topic,
|
||||
Status: string(job.Status),
|
||||
SessionKey: job.SessionKey,
|
||||
Params: job.Params,
|
||||
ParamsType: string(job.ParamsType),
|
||||
Result: job.Result,
|
||||
Error: job.Error,
|
||||
TimeScheduled: job.TimeScheduled,
|
||||
TimeStart: job.TimeStart,
|
||||
TimeEnd: job.TimeEnd,
|
||||
Timeout: job.Timeout,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allJobs, nil
|
||||
} else if circleID == "" || topic == "" {
|
||||
// If only one of the parameters is provided, we can't list jobs
|
||||
return []JobDisplayInfo{}, nil
|
||||
}
|
||||
|
||||
// List jobs
|
||||
jobIDs, err := h.client.ListJobs(circleID, topic)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list jobs: %w", err)
|
||||
}
|
||||
|
||||
// Get details for each job
|
||||
jobsList := make([]JobDisplayInfo, 0, len(jobIDs))
|
||||
for _, jobID := range jobIDs {
|
||||
job, err := h.client.GetJob(jobID)
|
||||
if err != nil {
|
||||
h.logger.Printf("Error getting job %s: %v", jobID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
jobInfo := JobDisplayInfo{
|
||||
JobID: fmt.Sprintf("%d", job.JobID),
|
||||
CircleID: job.CircleID,
|
||||
Topic: job.Topic,
|
||||
Status: string(job.Status),
|
||||
SessionKey: job.SessionKey,
|
||||
Params: job.Params,
|
||||
ParamsType: string(job.ParamsType),
|
||||
Result: job.Result,
|
||||
Error: job.Error,
|
||||
TimeScheduled: job.TimeScheduled,
|
||||
TimeStart: job.TimeStart,
|
||||
TimeEnd: job.TimeEnd,
|
||||
Timeout: job.Timeout,
|
||||
}
|
||||
jobsList = append(jobsList, jobInfo)
|
||||
}
|
||||
|
||||
return jobsList, nil
|
||||
}
|
111
_pkg2_dont_use/heroagent/pages/services.go
Normal file
111
_pkg2_dont_use/heroagent/pages/services.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package pages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/processmanager/interfaces/openrpc"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// ServiceHandler handles service-related page routes
|
||||
type ServiceHandler struct {
|
||||
client *openrpc.Client
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewServiceHandler creates a new service handler with the provided socket path and secret
|
||||
func NewServiceHandler(socketPath, secret string, logger *log.Logger) *ServiceHandler {
|
||||
fmt.Printf("DEBUG: Creating new pages.ServiceHandler with socket path: %s and secret: %s\n", socketPath, secret)
|
||||
return &ServiceHandler{
|
||||
client: openrpc.NewClient(socketPath, secret),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterRoutes registers service page routes
|
||||
func (h *ServiceHandler) RegisterRoutes(app *fiber.App) {
|
||||
services := app.Group("/services")
|
||||
|
||||
// Page routes
|
||||
services.Get("/", h.getServicesPage)
|
||||
services.Get("/data", h.getServicesData)
|
||||
}
|
||||
|
||||
// getServicesPage renders the services page
|
||||
func (h *ServiceHandler) getServicesPage(c *fiber.Ctx) error {
|
||||
// Get processes to display on the initial page load
|
||||
processes, _ := h.getProcessList()
|
||||
|
||||
// Check if we can connect to the process manager
|
||||
var warning string
|
||||
_, err := h.client.ListProcesses("json")
|
||||
if err != nil {
|
||||
warning = "Could not connect to process manager: " + err.Error()
|
||||
h.logger.Printf("Warning: %s", warning)
|
||||
}
|
||||
|
||||
return c.Render("admin/services", fiber.Map{
|
||||
"title": "Services",
|
||||
"processes": processes,
|
||||
"warning": warning,
|
||||
})
|
||||
}
|
||||
|
||||
// getServicesData returns only the services fragment for AJAX updates
|
||||
func (h *ServiceHandler) getServicesData(c *fiber.Ctx) error {
|
||||
// Get processes
|
||||
processes, _ := h.getProcessList()
|
||||
|
||||
// Render only the services fragment
|
||||
return c.Render("admin/services_fragment", fiber.Map{
|
||||
"processes": processes,
|
||||
})
|
||||
}
|
||||
|
||||
// getProcessList gets a list of processes from the process manager
|
||||
func (h *ServiceHandler) getProcessList() ([]ProcessDisplayInfo, error) {
|
||||
// Debug: Log the function entry
|
||||
h.logger.Printf("Entering getProcessList() function")
|
||||
fmt.Printf("DEBUG: getProcessList called using client: %p\n", h.client)
|
||||
|
||||
// Get the list of processes via the client
|
||||
result, err := h.client.ListProcesses("json")
|
||||
if err != nil {
|
||||
h.logger.Printf("Error listing processes: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the result to a slice of ProcessStatus
|
||||
listResult, ok := result.([]interface{})
|
||||
if !ok {
|
||||
h.logger.Printf("Error: unexpected result type from ListProcesses")
|
||||
return nil, fmt.Errorf("unexpected result type from ListProcesses")
|
||||
}
|
||||
|
||||
// Convert to display info format
|
||||
displayInfoList := make([]ProcessDisplayInfo, 0, len(listResult))
|
||||
for _, item := range listResult {
|
||||
procMap, ok := item.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Create a ProcessDisplayInfo from the map
|
||||
displayInfo := ProcessDisplayInfo{
|
||||
ID: fmt.Sprintf("%v", procMap["pid"]),
|
||||
Name: fmt.Sprintf("%v", procMap["name"]),
|
||||
Status: fmt.Sprintf("%v", procMap["status"]),
|
||||
Uptime: fmt.Sprintf("%v", procMap["uptime"]),
|
||||
StartTime: fmt.Sprintf("%v", procMap["start_time"]),
|
||||
CPU: fmt.Sprintf("%v%%", procMap["cpu"]),
|
||||
Memory: fmt.Sprintf("%v MB", procMap["memory"]),
|
||||
}
|
||||
displayInfoList = append(displayInfoList, displayInfo)
|
||||
}
|
||||
|
||||
// Debug: Log the number of processes
|
||||
h.logger.Printf("Found %d processes", len(displayInfoList))
|
||||
|
||||
return displayInfoList, nil
|
||||
}
|
54
_pkg2_dont_use/heroagent/pages/types.go
Normal file
54
_pkg2_dont_use/heroagent/pages/types.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package pages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/processmanager"
|
||||
)
|
||||
|
||||
// ProcessDisplayInfo represents information about a process for display purposes
|
||||
type ProcessDisplayInfo struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
Uptime string `json:"uptime"`
|
||||
StartTime string `json:"start_time"`
|
||||
CPU string `json:"cpu"`
|
||||
Memory string `json:"memory"`
|
||||
}
|
||||
|
||||
// ConvertToDisplayInfo converts a ProcessInfo from the processmanager package to ProcessDisplayInfo
|
||||
func ConvertToDisplayInfo(info *processmanager.ProcessInfo) ProcessDisplayInfo {
|
||||
// Calculate uptime from start time
|
||||
uptime := formatUptime(time.Since(info.StartTime))
|
||||
|
||||
return ProcessDisplayInfo{
|
||||
ID: fmt.Sprintf("%d", info.PID),
|
||||
Name: info.Name,
|
||||
Status: string(info.Status),
|
||||
Uptime: uptime,
|
||||
StartTime: info.StartTime.Format("2006-01-02 15:04:05"),
|
||||
CPU: fmt.Sprintf("%.2f%%", info.CPUPercent),
|
||||
Memory: fmt.Sprintf("%.2f MB", info.MemoryMB),
|
||||
}
|
||||
}
|
||||
|
||||
// formatUptime formats a duration as a human-readable uptime string
|
||||
func formatUptime(duration time.Duration) string {
|
||||
totalSeconds := int(duration.Seconds())
|
||||
days := totalSeconds / (24 * 3600)
|
||||
hours := (totalSeconds % (24 * 3600)) / 3600
|
||||
minutes := (totalSeconds % 3600) / 60
|
||||
seconds := totalSeconds % 60
|
||||
|
||||
if days > 0 {
|
||||
return fmt.Sprintf("%d days, %d hours", days, hours)
|
||||
} else if hours > 0 {
|
||||
return fmt.Sprintf("%d hours, %d minutes", hours, minutes)
|
||||
} else if minutes > 0 {
|
||||
return fmt.Sprintf("%d minutes, %d seconds", minutes, seconds)
|
||||
} else {
|
||||
return fmt.Sprintf("%d seconds", seconds)
|
||||
}
|
||||
}
|
132
_pkg2_dont_use/heroagent/web/endpoint_test.go
Normal file
132
_pkg2_dont_use/heroagent/web/endpoint_test.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package web
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestConfig holds configuration for the tests
|
||||
type TestConfig struct {
|
||||
BaseURL string
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// NewTestConfig creates a new test configuration
|
||||
func NewTestConfig() *TestConfig {
|
||||
return &TestConfig{
|
||||
BaseURL: "http://localhost:9021",
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// testEndpoint tests a single endpoint
|
||||
func testEndpoint(t *testing.T, config *TestConfig, method, path string, expectedStatus int, formData map[string]string) {
|
||||
t.Helper()
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: config.Timeout,
|
||||
}
|
||||
|
||||
var req *http.Request
|
||||
var err error
|
||||
|
||||
fullURL := config.BaseURL + path
|
||||
|
||||
if method == "GET" {
|
||||
req, err = http.NewRequest(method, fullURL, nil)
|
||||
} else if method == "POST" {
|
||||
if formData != nil {
|
||||
form := make(url.Values)
|
||||
for key, value := range formData {
|
||||
form.Add(key, value)
|
||||
}
|
||||
req, err = http.NewRequest(method, fullURL, strings.NewReader(form.Encode()))
|
||||
if err == nil {
|
||||
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
}
|
||||
} else {
|
||||
req, err = http.NewRequest(method, fullURL, nil)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make request: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != expectedStatus {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
t.Errorf("Expected status %d for %s %s, got %d. Response: %s",
|
||||
expectedStatus, method, path, resp.StatusCode, string(body))
|
||||
} else {
|
||||
t.Logf("✅ %s %s - Status: %d", method, path, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetEndpoints tests all GET endpoints
|
||||
func TestGetEndpoints(t *testing.T) {
|
||||
config := NewTestConfig()
|
||||
|
||||
// All endpoints to test
|
||||
getEndpoints := []string{
|
||||
"/", // Root redirect to admin
|
||||
"/admin", // Admin dashboard
|
||||
"/admin/system/info", // System info page
|
||||
"/admin/services", // Services page
|
||||
"/admin/system/processes", // Processes page
|
||||
"/admin/system/logs", // System logs page
|
||||
"/admin/system/settings", // System settings page
|
||||
}
|
||||
|
||||
// Test all endpoints
|
||||
for _, endpoint := range getEndpoints {
|
||||
t.Run(fmt.Sprintf("GET %s", endpoint), func(t *testing.T) {
|
||||
testEndpoint(t, config, "GET", endpoint, http.StatusOK, nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAPIEndpoints tests all API endpoints
|
||||
func TestAPIEndpoints(t *testing.T) {
|
||||
t.Skip("API endpoints need to be fixed")
|
||||
config := NewTestConfig()
|
||||
|
||||
apiEndpoints := []string{
|
||||
"/admin/api/hardware-stats", // Hardware stats API
|
||||
"/admin/api/process-stats", // Process stats API
|
||||
}
|
||||
|
||||
for _, endpoint := range apiEndpoints {
|
||||
t.Run(fmt.Sprintf("GET %s", endpoint), func(t *testing.T) {
|
||||
testEndpoint(t, config, "GET", endpoint, http.StatusOK, nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFragmentEndpoints tests all fragment endpoints used for AJAX updates
|
||||
func TestFragmentEndpoints(t *testing.T) {
|
||||
config := NewTestConfig()
|
||||
|
||||
// All fragment endpoints to test
|
||||
fragmentEndpoints := []string{
|
||||
"/admin/system/hardware-stats", // Hardware stats fragment
|
||||
"/admin/system/processes-data", // Processes data fragment
|
||||
}
|
||||
|
||||
// Test all fragment endpoints
|
||||
for _, endpoint := range fragmentEndpoints {
|
||||
t.Run(fmt.Sprintf("GET %s", endpoint), func(t *testing.T) {
|
||||
testEndpoint(t, config, "GET", endpoint, http.StatusOK, nil)
|
||||
})
|
||||
}
|
||||
}
|
739
_pkg2_dont_use/heroagent/web/static/css/admin.css
Normal file
739
_pkg2_dont_use/heroagent/web/static/css/admin.css
Normal file
@@ -0,0 +1,739 @@
|
||||
/* Admin Dashboard Styles */
|
||||
|
||||
/* Base Font Size and Typography */
|
||||
:root {
|
||||
--pico-font-size: 16px;
|
||||
--pico-font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
||||
--pico-line-height: 1.5;
|
||||
}
|
||||
|
||||
html {
|
||||
font-size: 100%;
|
||||
font-family: var(--pico-font-family);
|
||||
line-height: var(--pico-line-height);
|
||||
}
|
||||
|
||||
/* Layout */
|
||||
body {
|
||||
display: grid;
|
||||
grid-template-columns: 300px 1fr;
|
||||
grid-template-rows: 60px 1fr;
|
||||
grid-template-areas:
|
||||
"header header"
|
||||
"sidebar main";
|
||||
min-height: 100vh;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
overflow: hidden;
|
||||
gap: 0;
|
||||
}
|
||||
|
||||
/* Header - Documentation Style */
|
||||
header {
|
||||
grid-area: header;
|
||||
padding: 0 2rem;
|
||||
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
|
||||
background-color: #1a1f2b;
|
||||
position: sticky;
|
||||
top: 0;
|
||||
z-index: 100;
|
||||
box-shadow: 0 1px 3px rgba(0,0,0,0.2);
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
height: 60px;
|
||||
}
|
||||
|
||||
.top-nav {
|
||||
display: flex;
|
||||
justify-content: flex-start;
|
||||
align-items: center;
|
||||
margin: 0 auto;
|
||||
width: 100%;
|
||||
height: 60px;
|
||||
}
|
||||
|
||||
.top-nav .brand {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
font-weight: bold;
|
||||
font-size: 1.2rem;
|
||||
}
|
||||
|
||||
.top-nav .brand a {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
text-decoration: none;
|
||||
color: #00a8ff;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.brand-icon {
|
||||
width: 24px;
|
||||
height: 24px;
|
||||
filter: drop-shadow(0 0 2px rgba(0, 168, 255, 0.5));
|
||||
}
|
||||
|
||||
/* Documentation-style navigation */
|
||||
.nav-links {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 2rem;
|
||||
margin-left: 2rem;
|
||||
}
|
||||
|
||||
.nav-link {
|
||||
text-decoration: none;
|
||||
color: var(--pico-muted-color);
|
||||
font-weight: 500;
|
||||
padding: 0.5rem 0;
|
||||
position: relative;
|
||||
transition: color 0.2s ease;
|
||||
}
|
||||
|
||||
.nav-link:hover, .nav-link.active {
|
||||
color: var(--pico-primary);
|
||||
}
|
||||
|
||||
.nav-link.active::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
bottom: -0.8rem;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
height: 2px;
|
||||
background-color: var(--pico-primary);
|
||||
}
|
||||
|
||||
.nav-right {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1rem;
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
.search-box {
|
||||
width: auto !important;
|
||||
margin: auto !important;
|
||||
}
|
||||
|
||||
/* Sidebar */
|
||||
.sidebar {
|
||||
grid-area: sidebar;
|
||||
background-color: #1a1f2b;
|
||||
border-right: 1px solid rgba(255, 255, 255, 0.1);
|
||||
padding: 0;
|
||||
overflow-y: auto;
|
||||
height: calc(100vh - 60px);
|
||||
position: fixed;
|
||||
top: 60px;
|
||||
left: 0;
|
||||
width: 300px;
|
||||
color: #c5d0e6;
|
||||
z-index: 100;
|
||||
font-family: var(--pico-font-family);
|
||||
font-size: var(--pico-font-size);
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
.sidebar-content {
|
||||
padding: 1rem 0;
|
||||
display: block;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
/* Sidebar Navigation */
|
||||
.sidebar-wrapper {
|
||||
width: 100%;
|
||||
padding: 10px 0px;
|
||||
}
|
||||
|
||||
.sidebar-nav {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.sidebar-section {
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
/* Collapsible sidebar sections */
|
||||
.sidebar-heading.toggle {
|
||||
cursor: pointer;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.sidebar-heading.toggle::after {
|
||||
content: '▼';
|
||||
font-size: 10px;
|
||||
position: absolute;
|
||||
right: 1rem;
|
||||
top: 50%;
|
||||
transform: translateY(-50%);
|
||||
transition: transform 0.2s ease;
|
||||
}
|
||||
|
||||
.sidebar-section.collapsed .sidebar-heading.toggle::after {
|
||||
transform: translateY(-50%) rotate(-90deg);
|
||||
}
|
||||
|
||||
.sidebar-section.collapsed .sidebar-content-section {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.sidebar-heading {
|
||||
font-size: var(--pico-font-size);
|
||||
font-weight: 600;
|
||||
color: #8c9db5;
|
||||
padding: 0.25rem 1.25rem;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 0.05em;
|
||||
}
|
||||
|
||||
.sidebar-link {
|
||||
display: block;
|
||||
padding: 0.35rem 1.25rem;
|
||||
color: #a3b3cc;
|
||||
text-decoration: none;
|
||||
font-size: var(--pico-font-size);
|
||||
border-left: 3px solid transparent;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.sidebar-link.child {
|
||||
padding-left: 2.5rem;
|
||||
}
|
||||
|
||||
.sidebar-link:hover {
|
||||
color: #00a8ff;
|
||||
background-color: rgba(0, 168, 255, 0.05);
|
||||
}
|
||||
|
||||
.sidebar-link.active {
|
||||
color: #00a8ff;
|
||||
background-color: rgba(0, 168, 255, 0.1);
|
||||
border-left-color: #00a8ff;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
/* Vertical menu styling */
|
||||
.sidebar-menu {
|
||||
list-style: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
display: block;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.menu-item {
|
||||
display: block;
|
||||
width: 100%;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.menu-link {
|
||||
display: block;
|
||||
width: 100%;
|
||||
padding: 0.75rem 1.25rem;
|
||||
color: #a3b3cc;
|
||||
text-decoration: none;
|
||||
font-size: 0.9rem;
|
||||
border-left: 3px solid transparent;
|
||||
transition: all 0.2s ease;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.menu-link:hover {
|
||||
color: #00a8ff;
|
||||
background-color: rgba(0, 168, 255, 0.05);
|
||||
}
|
||||
|
||||
.menu-link.active {
|
||||
color: #00a8ff;
|
||||
background-color: rgba(0, 168, 255, 0.1);
|
||||
border-left-color: #00a8ff;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
/* Submenu styling */
|
||||
.has-submenu > .menu-link {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.has-submenu > .menu-link:after {
|
||||
content: '▼';
|
||||
font-size: 0.6rem;
|
||||
position: absolute;
|
||||
right: 1rem;
|
||||
top: 50%;
|
||||
transform: translateY(-50%);
|
||||
transition: transform 0.2s ease;
|
||||
}
|
||||
|
||||
.has-submenu.open > .menu-link:after {
|
||||
transform: translateY(-50%) rotate(180deg);
|
||||
}
|
||||
|
||||
.submenu {
|
||||
list-style: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
max-height: 0;
|
||||
overflow: hidden;
|
||||
transition: max-height 0.3s ease;
|
||||
display: block;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.has-submenu.open > .submenu {
|
||||
max-height: 500px;
|
||||
}
|
||||
|
||||
.submenu .menu-item {
|
||||
display: block;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.submenu .menu-link {
|
||||
padding-left: 2.5rem;
|
||||
font-size: 0.85rem;
|
||||
}
|
||||
|
||||
/* Main Content */
|
||||
main {
|
||||
grid-area: main;
|
||||
padding: 0;
|
||||
overflow-y: auto;
|
||||
margin-top: 0;
|
||||
font-family: var(--pico-font-family);
|
||||
font-size: var(--pico-font-size);
|
||||
line-height: var(--pico-line-height);
|
||||
color: #c5d0e6;
|
||||
background-color: #1a1f2b;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
/* Content Section */
|
||||
.content-section {
|
||||
padding: 0;
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
/* Services Page */
|
||||
.services-page {
|
||||
padding: 0;
|
||||
margin-top: -60px;
|
||||
}
|
||||
|
||||
/* Removed section-header styling as it's not needed */
|
||||
|
||||
.section-title {
|
||||
font-size: 1.2rem;
|
||||
font-weight: 600;
|
||||
margin-bottom: 0.1rem;
|
||||
margin-top: 0;
|
||||
color: #e0e6f0;
|
||||
padding-top: 0;
|
||||
padding-left: 1.25rem;
|
||||
}
|
||||
|
||||
.section-description {
|
||||
font-size: 0.85rem;
|
||||
color: #8c9db5;
|
||||
margin-bottom: 0.25rem;
|
||||
padding-left: 1.25rem;
|
||||
}
|
||||
|
||||
/* Typography consistency */
|
||||
h1, h2, h3, h4, h5, h6 {
|
||||
font-family: var(--pico-font-family);
|
||||
line-height: 1.2;
|
||||
margin-bottom: 1rem;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
h1 { font-size: 2rem; }
|
||||
h2 { font-size: 1.75rem; }
|
||||
h3 { font-size: 1.5rem; }
|
||||
h4 { font-size: 1.25rem; }
|
||||
h5 { font-size: 1.1rem; }
|
||||
h6 { font-size: 1rem; }
|
||||
|
||||
p, ul, ol, dl, table {
|
||||
font-size: var(--pico-font-size);
|
||||
line-height: var(--pico-line-height);
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
/* Cards and panels */
|
||||
.card, .panel {
|
||||
font-size: var(--pico-font-size);
|
||||
line-height: var(--pico-line-height);
|
||||
background-color: #232836;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2);
|
||||
padding: 0.75rem;
|
||||
margin-bottom: 0.5rem;
|
||||
height: fit-content;
|
||||
}
|
||||
|
||||
.card-title, .panel-title {
|
||||
font-size: 1rem;
|
||||
font-weight: 600;
|
||||
margin-bottom: 0.5rem;
|
||||
color: #e0e6f0;
|
||||
padding-bottom: 0.35rem;
|
||||
}
|
||||
|
||||
/* Tables */
|
||||
table {
|
||||
font-size: 0.9rem;
|
||||
width: 100%;
|
||||
border-collapse: separate;
|
||||
border-spacing: 0;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
th {
|
||||
font-weight: 600;
|
||||
text-align: left;
|
||||
padding: 0.5rem 0.75rem;
|
||||
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
|
||||
color: #8c9db5;
|
||||
font-size: 0.85rem;
|
||||
text-transform: uppercase;
|
||||
}
|
||||
|
||||
td {
|
||||
padding: 0.5rem 0.75rem;
|
||||
border-bottom: 1px solid rgba(255, 255, 255, 0.05);
|
||||
color: #c5d0e6;
|
||||
}
|
||||
|
||||
tr:hover td {
|
||||
background-color: rgba(0, 168, 255, 0.05);
|
||||
}
|
||||
|
||||
/* Forms */
|
||||
input, select, textarea, button {
|
||||
font-family: var(--pico-font-family);
|
||||
font-size: var(--pico-font-size);
|
||||
background-color: #2a303e;
|
||||
border: 1px solid rgba(255, 255, 255, 0.1);
|
||||
border-radius: 4px;
|
||||
padding: 0.5rem 0.75rem;
|
||||
color: #c5d0e6;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.form-group {
|
||||
margin-bottom: 1.25rem;
|
||||
}
|
||||
|
||||
.form-group label {
|
||||
display: block;
|
||||
margin-bottom: 0.5rem;
|
||||
color: #8c9db5;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
fieldset {
|
||||
border: 1px solid rgba(255, 255, 255, 0.1);
|
||||
border-radius: 4px;
|
||||
padding: 1rem;
|
||||
margin-bottom: 1.25rem;
|
||||
}
|
||||
|
||||
legend {
|
||||
padding: 0 0.5rem;
|
||||
color: #8c9db5;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
button, .button {
|
||||
background-color: #00a8ff;
|
||||
color: #fff;
|
||||
border: none;
|
||||
border-radius: 4px;
|
||||
padding: 0.4rem 0.75rem;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.2s ease;
|
||||
width: auto;
|
||||
font-size: 0.85rem;
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.button-group {
|
||||
display: flex;
|
||||
gap: 0.5rem;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
button:hover, .button:hover {
|
||||
background-color: #0090e0;
|
||||
}
|
||||
|
||||
button.secondary, .button.secondary {
|
||||
background-color: #2a303e;
|
||||
border: 1px solid rgba(255, 255, 255, 0.1);
|
||||
color: #a3b3cc;
|
||||
}
|
||||
|
||||
button.secondary:hover, .button.secondary:hover {
|
||||
background-color: #343d4f;
|
||||
}
|
||||
|
||||
button.danger, .button.danger {
|
||||
background-color: #e53935;
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
.button-group button.danger,
|
||||
.button-group .button.danger {
|
||||
background-color: #e53935;
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
button.danger:hover, .button.danger:hover,
|
||||
.button-group button.danger:hover,
|
||||
.button-group .button.danger:hover {
|
||||
background-color: #c62828;
|
||||
}
|
||||
|
||||
/* Section layouts */
|
||||
.content-section {
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
/* Removed duplicate section-title definition */
|
||||
|
||||
.section-description {
|
||||
color: #8c9db5;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
/* Grid layouts */
|
||||
.grid-container {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
|
||||
gap: 1.5rem;
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
/* Two-column layout */
|
||||
.two-column-layout {
|
||||
display: grid;
|
||||
grid-template-columns: 2fr 1fr;
|
||||
gap: 0.75rem;
|
||||
align-items: start;
|
||||
margin-top: 0.25rem;
|
||||
padding: 0 1.25rem;
|
||||
}
|
||||
|
||||
/* Badges */
|
||||
.badge {
|
||||
display: inline-block;
|
||||
padding: 0.2rem 0.5rem;
|
||||
border-radius: 4px;
|
||||
font-size: 0.75rem;
|
||||
font-weight: 500;
|
||||
text-align: center;
|
||||
letter-spacing: 0.02em;
|
||||
}
|
||||
|
||||
.badge.success {
|
||||
background-color: rgba(38, 194, 129, 0.15);
|
||||
color: #26c281;
|
||||
border: 1px solid rgba(38, 194, 129, 0.3);
|
||||
}
|
||||
|
||||
.badge.warning {
|
||||
background-color: rgba(255, 168, 0, 0.15);
|
||||
color: #ffa800;
|
||||
border: 1px solid rgba(255, 168, 0, 0.3);
|
||||
}
|
||||
|
||||
.badge.danger {
|
||||
background-color: rgba(255, 76, 76, 0.15);
|
||||
color: #ff4c4c;
|
||||
border: 1px solid rgba(255, 76, 76, 0.3);
|
||||
}
|
||||
|
||||
/* Log Panel */
|
||||
.log-panel {
|
||||
position: fixed;
|
||||
right: 0;
|
||||
top: 60px;
|
||||
width: 400px;
|
||||
height: calc(100vh - 60px);
|
||||
background-color: var(--pico-card-background-color);
|
||||
border-left: 1px solid var(--pico-muted-border-color);
|
||||
padding: 1rem;
|
||||
transform: translateX(100%);
|
||||
transition: transform 0.3s ease;
|
||||
z-index: 90;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.log-panel.open {
|
||||
transform: translateX(0);
|
||||
}
|
||||
|
||||
.log-toggle {
|
||||
position: fixed;
|
||||
right: 1rem;
|
||||
bottom: 1rem;
|
||||
z-index: 100;
|
||||
}
|
||||
|
||||
.log-content {
|
||||
font-family: monospace;
|
||||
white-space: pre-wrap;
|
||||
font-size: 0.85rem;
|
||||
background-color: var(--pico-code-background-color);
|
||||
padding: 1rem;
|
||||
border-radius: var(--pico-border-radius);
|
||||
height: calc(100% - 3rem);
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
/* Responsive adjustments */
|
||||
@media (max-width: 768px) {
|
||||
body {
|
||||
grid-template-columns: 1fr;
|
||||
grid-template-areas:
|
||||
"header"
|
||||
"main";
|
||||
}
|
||||
|
||||
.sidebar {
|
||||
position: fixed;
|
||||
left: 0;
|
||||
top: 60px;
|
||||
width: 250px;
|
||||
transform: translateX(-100%);
|
||||
transition: transform 0.3s ease;
|
||||
z-index: 95;
|
||||
}
|
||||
|
||||
.sidebar.open {
|
||||
transform: translateX(0);
|
||||
}
|
||||
|
||||
.menu-toggle {
|
||||
display: block !important;
|
||||
}
|
||||
}
|
||||
|
||||
@media (min-width: 769px) {
|
||||
.menu-toggle {
|
||||
display: none !important;
|
||||
}
|
||||
}
|
||||
|
||||
/* Log Level Styles */
|
||||
.log-info {
|
||||
background-color: rgba(13, 110, 253, 0.15);
|
||||
color: #0d6efd;
|
||||
border-radius: 4px;
|
||||
padding: 2px 6px;
|
||||
font-weight: 500;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.log-warning {
|
||||
background-color: rgba(255, 193, 7, 0.15);
|
||||
color: #ffc107;
|
||||
border-radius: 4px;
|
||||
padding: 2px 6px;
|
||||
font-weight: 500;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.log-error {
|
||||
background-color: rgba(220, 53, 69, 0.15);
|
||||
color: #dc3545;
|
||||
border-radius: 4px;
|
||||
padding: 2px 6px;
|
||||
font-weight: 500;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.log-debug {
|
||||
background-color: rgba(108, 117, 125, 0.15);
|
||||
color: #6c757d;
|
||||
border-radius: 4px;
|
||||
padding: 2px 6px;
|
||||
font-weight: 500;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
/* Log Page Specific Styles */
|
||||
.flex-container {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.filter-controls {
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
|
||||
.filter-grid {
|
||||
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
||||
gap: 1rem;
|
||||
align-items: end;
|
||||
}
|
||||
|
||||
.filter-item {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.filter-button {
|
||||
display: flex;
|
||||
align-items: flex-end;
|
||||
}
|
||||
|
||||
.filter-apply {
|
||||
width: 100%;
|
||||
margin-top: 0.5rem;
|
||||
padding: 0.6rem 1rem;
|
||||
}
|
||||
|
||||
/* Pagination improvements */
|
||||
.pagination {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 1rem 0;
|
||||
margin-top: 1rem;
|
||||
border-top: 1px solid rgba(255, 255, 255, 0.1);
|
||||
}
|
||||
|
||||
.pagination-info {
|
||||
font-size: 0.9rem;
|
||||
color: #8c9db5;
|
||||
}
|
||||
|
||||
.pagination-controls {
|
||||
display: flex;
|
||||
gap: 0.75rem;
|
||||
}
|
||||
|
||||
.pagination-btn {
|
||||
min-width: 100px;
|
||||
text-align: center;
|
||||
padding: 0.5rem 1rem;
|
||||
}
|
||||
|
||||
/* Utility classes */
|
||||
.hidden {
|
||||
display: none !important;
|
||||
}
|
76
_pkg2_dont_use/heroagent/web/static/css/jobs.css
Normal file
76
_pkg2_dont_use/heroagent/web/static/css/jobs.css
Normal file
@@ -0,0 +1,76 @@
|
||||
/* Jobs page styles */
|
||||
|
||||
.status-badge {
|
||||
display: inline-block;
|
||||
padding: 4px 8px;
|
||||
border-radius: 4px;
|
||||
font-size: 0.85em;
|
||||
font-weight: 500;
|
||||
text-transform: uppercase;
|
||||
}
|
||||
|
||||
.status-pending {
|
||||
background-color: #f0f0f0;
|
||||
color: #666;
|
||||
}
|
||||
|
||||
.status-running {
|
||||
background-color: #e3f2fd;
|
||||
color: #0d47a1;
|
||||
}
|
||||
|
||||
.status-completed {
|
||||
background-color: #e8f5e9;
|
||||
color: #1b5e20;
|
||||
}
|
||||
|
||||
.status-failed {
|
||||
background-color: #ffebee;
|
||||
color: #b71c1c;
|
||||
}
|
||||
|
||||
.status-scheduled {
|
||||
background-color: #fff8e1;
|
||||
color: #ff6f00;
|
||||
}
|
||||
|
||||
.status-canceled {
|
||||
background-color: #ede7f6;
|
||||
color: #4527a0;
|
||||
}
|
||||
|
||||
/* Form styles */
|
||||
#jobs-filter-form {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
/* Table styles */
|
||||
.table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
.table th,
|
||||
.table td {
|
||||
padding: 10px;
|
||||
text-align: left;
|
||||
border-bottom: 1px solid #eee;
|
||||
}
|
||||
|
||||
.table th {
|
||||
font-weight: 600;
|
||||
background-color: #f9f9f9;
|
||||
}
|
||||
|
||||
.table tr:hover {
|
||||
background-color: #f5f5f5;
|
||||
}
|
||||
|
||||
.text-center {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.button-small {
|
||||
padding: 4px 8px;
|
||||
font-size: 0.85em;
|
||||
}
|
99
_pkg2_dont_use/heroagent/web/static/css/logs.css
Normal file
99
_pkg2_dont_use/heroagent/web/static/css/logs.css
Normal file
@@ -0,0 +1,99 @@
|
||||
/* Styles for the logs page */
|
||||
|
||||
.log-container {
|
||||
margin-top: 1.5rem;
|
||||
border-radius: 8px;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.log-table {
|
||||
overflow-x: auto;
|
||||
max-height: 600px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.log-table table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
.log-table th {
|
||||
position: sticky;
|
||||
top: 0;
|
||||
background-color: var(--card-background-color);
|
||||
z-index: 10;
|
||||
padding: 0.75rem;
|
||||
text-align: left;
|
||||
font-weight: 600;
|
||||
border-bottom: 1px solid var(--card-border-color);
|
||||
}
|
||||
|
||||
.log-table td {
|
||||
padding: 0.5rem 0.75rem;
|
||||
border-bottom: 1px solid var(--card-border-color);
|
||||
font-family: var(--font-family-monospace);
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
|
||||
/* Log level styles */
|
||||
.log-info {
|
||||
color: var(--primary);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.log-warning {
|
||||
color: var(--warning);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.log-error {
|
||||
color: var(--danger);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
/* Filter controls */
|
||||
.filter-controls {
|
||||
background-color: var(--card-background-color);
|
||||
border-radius: 8px;
|
||||
padding: 1rem;
|
||||
margin-bottom: 1.5rem;
|
||||
border: 1px solid var(--card-border-color);
|
||||
}
|
||||
|
||||
.filter-grid {
|
||||
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
|
||||
gap: 1rem;
|
||||
align-items: end;
|
||||
}
|
||||
|
||||
.filter-button {
|
||||
display: flex;
|
||||
align-items: flex-end;
|
||||
}
|
||||
|
||||
/* Pagination */
|
||||
.pagination {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 1rem 0;
|
||||
margin-top: 1rem;
|
||||
}
|
||||
|
||||
.pagination-controls {
|
||||
display: flex;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.pagination-btn {
|
||||
padding: 0.25rem 0.75rem;
|
||||
}
|
||||
|
||||
/* Loading indicator */
|
||||
.loading-indicator {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
height: 200px;
|
||||
color: var(--muted-color);
|
||||
}
|
4
_pkg2_dont_use/heroagent/web/static/css/pico.min.css
vendored
Normal file
4
_pkg2_dont_use/heroagent/web/static/css/pico.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
6
_pkg2_dont_use/heroagent/web/static/css/unpoly.min.css
vendored
Normal file
6
_pkg2_dont_use/heroagent/web/static/css/unpoly.min.css
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
[hidden][hidden]{display:none !important}
|
||||
up-wrapper{display:inline-block}
|
||||
up-bounds{position:absolute}.up-focus-hidden:focus-visible{outline-color:rgba(0,0,0,0) !important;outline-style:none !important}body.up-scrollbar-away{padding-right:calc(var(--up-scrollbar-width) + var(--up-original-padding-right)) !important}body.up-scrollbar-away,html:has(>body.up-scrollbar-away){overflow-y:hidden !important}body.up-scrollbar-away .up-scrollbar-away{right:calc(var(--up-scrollbar-width) + var(--up-original-right)) !important}
|
||||
.up-request-loader{display:none}up-progress-bar{position:fixed;top:0;left:0;z-index:999999999;height:3px;background-color:#007bff}
|
||||
up-focus-trap{position:fixed;top:0;left:0;width:0;height:0}up-cover-viewport,up-drawer-viewport,up-modal-viewport,up-drawer-backdrop,up-modal-backdrop,up-cover,up-drawer,up-modal{top:0;left:0;bottom:0;right:0}up-drawer-box,up-modal-box{box-shadow:0 0 10px 1px rgba(0,0,0,.3)}up-popup{box-shadow:0 0 4px rgba(0,0,0,.3)}up-popup:focus,up-cover-box:focus,up-drawer-box:focus,up-modal-box:focus,up-cover:focus,up-drawer:focus,up-modal:focus,up-popup:focus-visible,up-cover-box:focus-visible,up-drawer-box:focus-visible,up-modal-box:focus-visible,up-cover:focus-visible,up-drawer:focus-visible,up-modal:focus-visible{outline:none}up-cover,up-drawer,up-modal{z-index:2000;position:fixed}up-drawer-backdrop,up-modal-backdrop{position:absolute;background:rgba(0,0,0,.4)}up-cover-viewport,up-drawer-viewport,up-modal-viewport{position:absolute;overflow-y:scroll;overflow-x:hidden;overscroll-behavior:contain;display:flex;align-items:flex-start;justify-content:center}up-popup,up-cover-box,up-drawer-box,up-modal-box{position:relative;box-sizing:border-box;max-width:100%;background-color:#fff;padding:20px;overflow-x:hidden}up-popup-content,up-cover-content,up-drawer-content,up-modal-content{display:block}up-popup{z-index:1000}up-popup-dismiss,up-cover-dismiss,up-drawer-dismiss,up-modal-dismiss{color:#888;position:absolute;top:10px;right:10px;font-size:1.7rem;line-height:.5;cursor:pointer}up-modal[nesting="0"] up-modal-viewport{padding:25px 15px}up-modal[nesting="1"] up-modal-viewport{padding:50px 30px}up-modal[nesting="2"] up-modal-viewport{padding:75px 45px}up-modal[nesting="3"] up-modal-viewport{padding:100px 60px}up-modal[nesting="4"] up-modal-viewport{padding:125px 75px}up-modal[size=small] up-modal-box{width:350px}up-modal[size=medium] up-modal-box{width:650px}up-modal[size=large] up-modal-box{width:1000px}up-modal[size=grow] up-modal-box{width:auto}up-modal[size=full] up-modal-box{width:100%}up-drawer-viewport{justify-content:flex-start}up-drawer[position=right] up-drawer-viewport{justify-content:flex-end}up-drawer-box{min-height:100vh}up-drawer[size=small] up-drawer-box{width:150px}up-drawer[size=medium] up-drawer-box{width:340px}up-drawer[size=large] up-drawer-box{width:600px}up-drawer[size=grow] up-drawer-box{width:auto}up-drawer[size=full] up-drawer-box{width:100%}up-cover-box{width:100%;min-height:100vh;padding:0}up-popup{padding:15px;text-align:left}up-popup[size=small]{width:180px}up-popup[size=medium]{width:300px}up-popup[size=large]{width:550px}up-popup[size=grow] up-popup{width:auto}up-popup[size=full] up-popup{width:100%}
|
||||
[up-clickable][role=link]{cursor:pointer}[up-expand]:not([role]),[up-expand][role=link]{cursor:pointer}
|
1
_pkg2_dont_use/heroagent/web/static/favicon.ico
Normal file
1
_pkg2_dont_use/heroagent/web/static/favicon.ico
Normal file
@@ -0,0 +1 @@
|
||||
|
11
_pkg2_dont_use/heroagent/web/static/img/flower.svg
Normal file
11
_pkg2_dont_use/heroagent/web/static/img/flower.svg
Normal file
@@ -0,0 +1,11 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="64px" height="64px" viewBox="0 0 64 64" version="1.1" xmlns="http://www.w3.org/2000/svg">
|
||||
<title>Flower Icon</title>
|
||||
<g stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
|
||||
<circle fill="#4CAF50" cx="32" cy="32" r="8"></circle>
|
||||
<path d="M32,16 C36.418278,16 40,19.581722 40,24 C40,28.418278 36.418278,32 32,32 C27.581722,32 24,28.418278 24,24 C24,19.581722 27.581722,16 32,16 Z" fill="#8BC34A" transform="translate(32.000000, 24.000000) rotate(-45.000000) translate(-32.000000, -24.000000)"></path>
|
||||
<path d="M32,16 C36.418278,16 40,19.581722 40,24 C40,28.418278 36.418278,32 32,32 C27.581722,32 24,28.418278 24,24 C24,19.581722 27.581722,16 32,16 Z" fill="#CDDC39" transform="translate(32.000000, 24.000000) rotate(45.000000) translate(-32.000000, -24.000000)"></path>
|
||||
<path d="M32,32 C36.418278,32 40,35.581722 40,40 C40,44.418278 36.418278,48 32,48 C27.581722,48 24,44.418278 24,40 C24,35.581722 27.581722,32 32,32 Z" fill="#FF9800" transform="translate(32.000000, 40.000000) rotate(-45.000000) translate(-32.000000, -40.000000)"></path>
|
||||
<path d="M32,32 C36.418278,32 40,35.581722 40,40 C40,44.418278 36.418278,48 32,48 C27.581722,48 24,44.418278 24,40 C24,35.581722 27.581722,32 32,32 Z" fill="#FFC107" transform="translate(32.000000, 40.000000) rotate(45.000000) translate(-32.000000, -40.000000)"></path>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 1.4 KiB |
23
_pkg2_dont_use/heroagent/web/static/img/hero-icon.svg
Normal file
23
_pkg2_dont_use/heroagent/web/static/img/hero-icon.svg
Normal file
@@ -0,0 +1,23 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="24px" height="24px" viewBox="0 0 24 24" version="1.1" xmlns="http://www.w3.org/2000/svg">
|
||||
<defs>
|
||||
<linearGradient id="heroGradient" x1="0%" y1="0%" x2="100%" y2="100%">
|
||||
<stop offset="0%" stop-color="#00A8FF" />
|
||||
<stop offset="100%" stop-color="#0077CC" />
|
||||
</linearGradient>
|
||||
<filter id="glow" x="-30%" y="-30%" width="160%" height="160%">
|
||||
<feGaussianBlur stdDeviation="1" result="blur" />
|
||||
<feComposite in="SourceGraphic" in2="blur" operator="over" />
|
||||
</filter>
|
||||
</defs>
|
||||
<g stroke="none" stroke-width="1" fill="none" fill-rule="evenodd" filter="url(#glow)">
|
||||
<!-- Hero mask/shield shape -->
|
||||
<path d="M12,2 L21,6 C21,13.5 18,19 12,22 C6,19 3,13.5 3,6 L12,2 Z" fill="url(#heroGradient)" />
|
||||
|
||||
<!-- Stylized H for Hero -->
|
||||
<path d="M8,7 L8,17 M16,7 L16,17 M8,12 L16,12" stroke="#FFFFFF" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" />
|
||||
|
||||
<!-- Small star/sparkle -->
|
||||
<circle cx="12" cy="5" r="1" fill="#FFFFFF" />
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 1.0 KiB |
19
_pkg2_dont_use/heroagent/web/static/img/hero-logo.svg
Normal file
19
_pkg2_dont_use/heroagent/web/static/img/hero-logo.svg
Normal file
@@ -0,0 +1,19 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="120px" height="30px" viewBox="0 0 120 30" version="1.1" xmlns="http://www.w3.org/2000/svg">
|
||||
<g stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
|
||||
<!-- Hero Icon -->
|
||||
<g transform="translate(5, 3)" fill="#00A8FF">
|
||||
<circle cx="12" cy="12" r="11" stroke="#00A8FF" stroke-width="2" fill="none"/>
|
||||
<rect x="11" y="4" width="2" height="6" rx="1"/>
|
||||
<rect x="6" y="8" width="2" height="6" rx="1"/>
|
||||
<rect x="16" y="8" width="2" height="6" rx="1"/>
|
||||
<rect x="11" y="14" width="2" height="6" rx="1"/>
|
||||
<rect x="8" y="11" width="8" height="2" rx="1"/>
|
||||
</g>
|
||||
|
||||
<!-- Text -->
|
||||
<text font-family="Arial, sans-serif" font-size="14" font-weight="bold" fill="#FFFFFF">
|
||||
<tspan x="30" y="19">HeroLauncher</tspan>
|
||||
</text>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 844 B |
239
_pkg2_dont_use/heroagent/web/static/js/admin.js
Normal file
239
_pkg2_dont_use/heroagent/web/static/js/admin.js
Normal file
@@ -0,0 +1,239 @@
|
||||
// Admin Dashboard JavaScript - Documentation Style
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
// Highlight active navigation links
|
||||
highlightActiveLinks();
|
||||
|
||||
// Setup UI toggles
|
||||
setupUIToggles();
|
||||
|
||||
// Setup search functionality
|
||||
setupSearch();
|
||||
});
|
||||
|
||||
// Highlight the current active navigation links
|
||||
function highlightActiveLinks() {
|
||||
const currentPath = window.location.pathname;
|
||||
|
||||
// Handle top navigation links
|
||||
const navLinks = document.querySelectorAll('.nav-link');
|
||||
navLinks.forEach(link => {
|
||||
link.classList.remove('active');
|
||||
const href = link.getAttribute('href');
|
||||
|
||||
// Check if current path starts with the nav link path
|
||||
// This allows section links to be highlighted when on sub-pages
|
||||
if (currentPath === href ||
|
||||
(href !== '/admin' && currentPath.startsWith(href))) {
|
||||
link.classList.add('active');
|
||||
}
|
||||
});
|
||||
|
||||
// Handle sidebar links
|
||||
const sidebarLinks = document.querySelectorAll('.doc-link');
|
||||
sidebarLinks.forEach(link => {
|
||||
link.classList.remove('active');
|
||||
if (link.getAttribute('href') === currentPath) {
|
||||
link.classList.add('active');
|
||||
|
||||
// Also highlight parent section if needed
|
||||
const parentSection = link.closest('.sidebar-section');
|
||||
if (parentSection) {
|
||||
parentSection.classList.add('active-section');
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Setup UI toggle functionality
|
||||
function setupUIToggles() {
|
||||
// Toggle sidebar on mobile
|
||||
const menuToggle = document.querySelector('.menu-toggle');
|
||||
const sidebar = document.querySelector('.sidebar');
|
||||
|
||||
if (menuToggle && sidebar) {
|
||||
menuToggle.addEventListener('click', function() {
|
||||
sidebar.classList.toggle('open');
|
||||
});
|
||||
}
|
||||
|
||||
// Toggle log panel
|
||||
const logToggle = document.querySelector('.log-toggle');
|
||||
const logPanel = document.querySelector('.log-panel');
|
||||
|
||||
if (logToggle && logPanel) {
|
||||
logToggle.addEventListener('click', function() {
|
||||
logPanel.classList.toggle('open');
|
||||
});
|
||||
}
|
||||
|
||||
// Setup Docusaurus-style collapsible menu
|
||||
setupTreeviewMenu();
|
||||
}
|
||||
|
||||
// Setup sidebar navigation
|
||||
function setupTreeviewMenu() {
|
||||
// Set active sidebar links based on current URL
|
||||
setActiveSidebarLinks();
|
||||
|
||||
// Setup collapsible sections
|
||||
setupCollapsibleSections();
|
||||
}
|
||||
|
||||
// Set active sidebar links based on current URL
|
||||
function setActiveSidebarLinks() {
|
||||
const currentPath = window.location.pathname;
|
||||
|
||||
// Find all sidebar links
|
||||
const sidebarLinks = document.querySelectorAll('.sidebar-link');
|
||||
|
||||
// Remove any existing active classes
|
||||
sidebarLinks.forEach(link => {
|
||||
link.classList.remove('active');
|
||||
});
|
||||
|
||||
// Find and mark active links
|
||||
let activeFound = false;
|
||||
sidebarLinks.forEach(link => {
|
||||
const linkPath = link.getAttribute('href');
|
||||
|
||||
// Check if the current path matches or starts with the link path
|
||||
// For exact matches or if it's a parent path
|
||||
if (currentPath === linkPath ||
|
||||
(linkPath !== '/admin' && currentPath.startsWith(linkPath))) {
|
||||
// Mark this link as active
|
||||
link.classList.add('active');
|
||||
activeFound = true;
|
||||
|
||||
// Expand the parent section if this link is inside a collapsible section
|
||||
const parentSection = link.closest('.sidebar-content-section')?.parentElement;
|
||||
if (parentSection && parentSection.classList.contains('collapsible')) {
|
||||
parentSection.classList.remove('collapsed');
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Setup collapsible sections
|
||||
function setupCollapsibleSections() {
|
||||
// Find all toggle headings
|
||||
const toggleHeadings = document.querySelectorAll('.sidebar-heading.toggle');
|
||||
|
||||
// Set all sections as collapsed by default
|
||||
document.querySelectorAll('.sidebar-section.collapsible').forEach(section => {
|
||||
section.classList.add('collapsed');
|
||||
});
|
||||
|
||||
toggleHeadings.forEach(heading => {
|
||||
// Add click event to toggle section
|
||||
heading.addEventListener('click', function() {
|
||||
const section = this.parentElement;
|
||||
section.classList.toggle('collapsed');
|
||||
});
|
||||
});
|
||||
|
||||
// Open the section that contains the active link
|
||||
const activeLink = document.querySelector('.sidebar-link.active');
|
||||
if (activeLink) {
|
||||
const parentSection = activeLink.closest('.sidebar-section.collapsible');
|
||||
if (parentSection) {
|
||||
parentSection.classList.remove('collapsed');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh processes data without page reload
|
||||
function refreshProcesses() {
|
||||
// Show loading indicator
|
||||
const loadingIndicator = document.getElementById('refresh-loading');
|
||||
if (loadingIndicator) {
|
||||
loadingIndicator.style.display = 'inline';
|
||||
}
|
||||
|
||||
// Get the processes content element
|
||||
const tableContent = document.querySelector('.processes-table-content');
|
||||
|
||||
// Use Unpoly to refresh the content
|
||||
if (tableContent && window.up) {
|
||||
// Use Unpoly's API to reload the fragment
|
||||
up.reload('.processes-table-content', {
|
||||
url: '/admin/system/processes-data',
|
||||
headers: {
|
||||
'X-Requested-With': 'XMLHttpRequest'
|
||||
}
|
||||
}).then(() => {
|
||||
console.log('Process data refreshed successfully via Unpoly');
|
||||
}).catch(error => {
|
||||
console.error('Error refreshing processes data:', error);
|
||||
}).finally(() => {
|
||||
// Hide loading indicator
|
||||
if (loadingIndicator) {
|
||||
loadingIndicator.style.display = 'none';
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Fallback to fetch if Unpoly is not available
|
||||
fetch('/admin/system/processes-data', {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Accept': 'text/html',
|
||||
'X-Requested-With': 'XMLHttpRequest'
|
||||
},
|
||||
cache: 'no-store'
|
||||
})
|
||||
.then(response => {
|
||||
if (!response.ok) {
|
||||
throw new Error('Network response was not ok: ' + response.status);
|
||||
}
|
||||
return response.text();
|
||||
})
|
||||
.then(html => {
|
||||
// Update the processes table content
|
||||
if (tableContent) {
|
||||
// Replace the table content with the new HTML
|
||||
tableContent.innerHTML = html;
|
||||
console.log('Process data refreshed successfully via fetch');
|
||||
} else {
|
||||
console.error('Could not find processes table content element');
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
console.error('Error refreshing processes data:', error);
|
||||
})
|
||||
.finally(() => {
|
||||
// Hide loading indicator
|
||||
if (loadingIndicator) {
|
||||
loadingIndicator.style.display = 'none';
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Logging functionality has been moved to Unpoly-based implementation
|
||||
|
||||
// Setup search functionality
|
||||
function setupSearch() {
|
||||
const searchInput = document.querySelector('.search-box input');
|
||||
if (searchInput) {
|
||||
searchInput.addEventListener('keyup', function(e) {
|
||||
if (e.key === 'Enter') {
|
||||
performSearch(this.value);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Perform search
|
||||
function performSearch(query) {
|
||||
if (!query.trim()) return;
|
||||
|
||||
// Log the search query
|
||||
window.adminLog(`Searching for: ${query}`, 'info');
|
||||
|
||||
// In a real application, this would send an AJAX request to search the docs
|
||||
// For now, just simulate a search by redirecting to a search results page
|
||||
// window.location.href = `/admin/search?q=${encodeURIComponent(query)}`;
|
||||
|
||||
// For demo purposes, show a message in the console
|
||||
console.log(`Search query: ${query}`);
|
||||
}
|
89
_pkg2_dont_use/heroagent/web/static/js/charts/cpu-chart.js
Normal file
89
_pkg2_dont_use/heroagent/web/static/js/charts/cpu-chart.js
Normal file
@@ -0,0 +1,89 @@
|
||||
// CPU chart initialization and update functions
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
// Background color for charts
|
||||
var chartBgColor = '#1e1e2f';
|
||||
|
||||
// Initialize CPU chart
|
||||
var cpuChartDom = document.getElementById('cpu-chart');
|
||||
if (!cpuChartDom) return;
|
||||
|
||||
var cpuChart = echarts.init(cpuChartDom, {renderer: 'canvas', useDirtyRect: false, backgroundColor: chartBgColor});
|
||||
var cpuOption = {
|
||||
tooltip: {
|
||||
trigger: 'item',
|
||||
formatter: function(params) {
|
||||
// Get the PID from the data
|
||||
var pid = params.data.pid || 'N/A';
|
||||
return params.seriesName + '<br/>' +
|
||||
params.name + ' (PID: ' + pid + ')<br/>' +
|
||||
'CPU: ' + Math.round(params.value) + '%';
|
||||
}
|
||||
},
|
||||
legend: {
|
||||
orient: 'vertical',
|
||||
left: 10,
|
||||
top: 'center',
|
||||
textStyle: {
|
||||
color: '#fff'
|
||||
},
|
||||
formatter: function(name) {
|
||||
// Display full process name without truncation
|
||||
return name;
|
||||
},
|
||||
itemGap: 8,
|
||||
itemWidth: 15,
|
||||
padding: 10
|
||||
},
|
||||
series: [
|
||||
{
|
||||
name: 'Process CPU Usage',
|
||||
type: 'pie',
|
||||
radius: ['40%', '70%'],
|
||||
avoidLabelOverlap: true,
|
||||
itemStyle: {
|
||||
borderRadius: 10,
|
||||
borderColor: '#fff',
|
||||
borderWidth: 2
|
||||
},
|
||||
label: {
|
||||
show: false,
|
||||
position: 'center'
|
||||
},
|
||||
emphasis: {
|
||||
label: {
|
||||
show: true,
|
||||
fontSize: 16,
|
||||
fontWeight: 'bold'
|
||||
}
|
||||
},
|
||||
labelLine: {
|
||||
show: false
|
||||
},
|
||||
data: [{ name: 'Loading...', value: 100 }]
|
||||
}
|
||||
]
|
||||
};
|
||||
cpuChart.setOption(cpuOption);
|
||||
|
||||
// Function to update CPU chart
|
||||
window.updateCpuChart = function(processes) {
|
||||
// Calculate total CPU usage for top 5 processes
|
||||
var topProcesses = processes.slice(0, 5);
|
||||
var cpuUsageData = topProcesses.map(p => ({
|
||||
name: p.name, // Use full process name
|
||||
value: p.cpu_percent,
|
||||
pid: p.pid // Store PID for tooltip
|
||||
}));
|
||||
|
||||
// Update chart option
|
||||
cpuOption.series[0].data = cpuUsageData;
|
||||
|
||||
// Apply updated option
|
||||
cpuChart.setOption(cpuOption);
|
||||
};
|
||||
|
||||
// Handle window resize
|
||||
window.addEventListener('resize', function() {
|
||||
cpuChart && cpuChart.resize();
|
||||
});
|
||||
});
|
@@ -0,0 +1,96 @@
|
||||
// Memory chart initialization and update functions
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
// Background color for charts
|
||||
var chartBgColor = '#1e1e2f';
|
||||
|
||||
// Initialize Memory chart
|
||||
var memoryChartDom = document.getElementById('memory-chart');
|
||||
if (!memoryChartDom) return;
|
||||
|
||||
var memoryChart = echarts.init(memoryChartDom, {renderer: 'canvas', useDirtyRect: false, backgroundColor: chartBgColor});
|
||||
var memoryOption = {
|
||||
tooltip: {
|
||||
trigger: 'item',
|
||||
formatter: function(params) {
|
||||
// Get the PID from the data
|
||||
var pid = params.data.pid || 'N/A';
|
||||
return params.seriesName + '<br/>' +
|
||||
params.name + ' (PID: ' + pid + ')<br/>' +
|
||||
'Memory: ' + Math.round(params.value) + ' MB';
|
||||
},
|
||||
textStyle: {
|
||||
fontSize: 14
|
||||
}
|
||||
},
|
||||
legend: {
|
||||
orient: 'vertical',
|
||||
left: 10,
|
||||
top: 'center',
|
||||
textStyle: {
|
||||
color: '#fff'
|
||||
},
|
||||
formatter: function(name) {
|
||||
// Display full process name without truncation
|
||||
return name;
|
||||
},
|
||||
itemGap: 12, // Increased gap for better readability
|
||||
itemWidth: 15,
|
||||
padding: 10
|
||||
},
|
||||
series: [
|
||||
{
|
||||
name: 'Process Memory Usage',
|
||||
type: 'pie',
|
||||
radius: ['40%', '70%'],
|
||||
avoidLabelOverlap: true,
|
||||
itemStyle: {
|
||||
borderRadius: 10,
|
||||
borderColor: '#fff',
|
||||
borderWidth: 2
|
||||
},
|
||||
label: {
|
||||
show: false,
|
||||
position: 'center'
|
||||
},
|
||||
emphasis: {
|
||||
label: {
|
||||
show: true,
|
||||
fontSize: 16,
|
||||
fontWeight: 'bold'
|
||||
}
|
||||
},
|
||||
labelLine: {
|
||||
show: false
|
||||
},
|
||||
data: [{ name: 'Loading...', value: 100 }]
|
||||
}
|
||||
]
|
||||
};
|
||||
memoryChart.setOption(memoryOption);
|
||||
|
||||
// Function to update Memory chart
|
||||
window.updateMemoryChart = function(processes) {
|
||||
// Sort processes by memory usage (descending)
|
||||
var topProcesses = processes
|
||||
.slice()
|
||||
.sort((a, b) => b.memory_mb - a.memory_mb)
|
||||
.slice(0, 5);
|
||||
|
||||
var memoryUsageData = topProcesses.map(p => ({
|
||||
name: p.name, // Use full process name
|
||||
value: p.memory_mb,
|
||||
pid: p.pid // Store PID for tooltip
|
||||
}));
|
||||
|
||||
// Update chart option
|
||||
memoryOption.series[0].data = memoryUsageData;
|
||||
|
||||
// Apply updated option
|
||||
memoryChart.setOption(memoryOption);
|
||||
};
|
||||
|
||||
// Handle window resize
|
||||
window.addEventListener('resize', function() {
|
||||
memoryChart && memoryChart.resize();
|
||||
});
|
||||
});
|
116
_pkg2_dont_use/heroagent/web/static/js/charts/network-chart.js
Normal file
116
_pkg2_dont_use/heroagent/web/static/js/charts/network-chart.js
Normal file
@@ -0,0 +1,116 @@
|
||||
// Network chart initialization and update functions
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
// Background color for charts
|
||||
var chartBgColor = '#1e1e2f';
|
||||
|
||||
// Initialize network chart
|
||||
var networkChartDom = document.getElementById('network-chart');
|
||||
if (!networkChartDom) return;
|
||||
|
||||
var networkChart = echarts.init(networkChartDom, {renderer: 'canvas', useDirtyRect: false, backgroundColor: chartBgColor});
|
||||
var networkOption = {
|
||||
title: {
|
||||
text: 'Network Traffic',
|
||||
left: 'center',
|
||||
textStyle: {
|
||||
color: '#fff'
|
||||
}
|
||||
},
|
||||
tooltip: {
|
||||
trigger: 'axis'
|
||||
},
|
||||
legend: {
|
||||
data: ['Upload', 'Download'],
|
||||
textStyle: {
|
||||
color: '#fff'
|
||||
},
|
||||
bottom: 10
|
||||
},
|
||||
xAxis: {
|
||||
type: 'category',
|
||||
data: [],
|
||||
axisLabel: {
|
||||
color: '#fff'
|
||||
}
|
||||
},
|
||||
yAxis: {
|
||||
type: 'value',
|
||||
axisLabel: {
|
||||
color: '#fff',
|
||||
formatter: '{value} KB/s'
|
||||
}
|
||||
},
|
||||
series: [
|
||||
{
|
||||
name: 'Upload',
|
||||
type: 'line',
|
||||
data: []
|
||||
},
|
||||
{
|
||||
name: 'Download',
|
||||
type: 'line',
|
||||
data: []
|
||||
}
|
||||
]
|
||||
};
|
||||
networkChart.setOption(networkOption);
|
||||
|
||||
// Data for network chart
|
||||
var timestamps = [];
|
||||
var uploadData = [];
|
||||
var downloadData = [];
|
||||
|
||||
// Function to update network chart
|
||||
window.updateNetworkChart = function(upSpeed, downSpeed) {
|
||||
// Convert speeds to KB/s for consistent units
|
||||
var upKBps = convertToKBps(upSpeed);
|
||||
var downKBps = convertToKBps(downSpeed);
|
||||
|
||||
// Add current timestamp
|
||||
var now = new Date();
|
||||
var timeString = now.getHours() + ':' +
|
||||
(now.getMinutes() < 10 ? '0' + now.getMinutes() : now.getMinutes()) + ':' +
|
||||
(now.getSeconds() < 10 ? '0' + now.getSeconds() : now.getSeconds());
|
||||
|
||||
// Update data arrays
|
||||
timestamps.push(timeString);
|
||||
uploadData.push(upKBps);
|
||||
downloadData.push(downKBps);
|
||||
|
||||
// Keep only the last 10 data points
|
||||
if (timestamps.length > 10) {
|
||||
timestamps.shift();
|
||||
uploadData.shift();
|
||||
downloadData.shift();
|
||||
}
|
||||
|
||||
// Update chart option
|
||||
networkOption.xAxis.data = timestamps;
|
||||
networkOption.series[0].data = uploadData;
|
||||
networkOption.series[1].data = downloadData;
|
||||
|
||||
// Apply updated option
|
||||
networkChart.setOption(networkOption);
|
||||
};
|
||||
|
||||
// Helper function to convert network speeds to KB/s
|
||||
function convertToKBps(speedString) {
|
||||
var value = parseFloat(speedString);
|
||||
var unit = speedString.replace(/[\d.]/g, '');
|
||||
|
||||
if (unit === 'Mbps') {
|
||||
return value * 125; // 1 Mbps = 125 KB/s
|
||||
} else if (unit === 'Kbps') {
|
||||
return value / 8; // 1 Kbps = 0.125 KB/s
|
||||
} else if (unit === 'Gbps') {
|
||||
return value * 125000; // 1 Gbps = 125000 KB/s
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle window resize
|
||||
window.addEventListener('resize', function() {
|
||||
networkChart && networkChart.resize();
|
||||
});
|
||||
});
|
@@ -0,0 +1,88 @@
|
||||
// Data fetching functions for system stats
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
// Function to fetch hardware stats
|
||||
function fetchHardwareStats() {
|
||||
fetch('/api/hardware-stats')
|
||||
.then(response => {
|
||||
if (!response.ok) {
|
||||
throw new Error('Network response was not ok');
|
||||
}
|
||||
return response.json();
|
||||
})
|
||||
.then(data => {
|
||||
// Extract network speeds
|
||||
var upSpeed = data.network && data.network.upload_speed ? data.network.upload_speed : '0Mbps';
|
||||
var downSpeed = data.network && data.network.download_speed ? data.network.download_speed : '0Mbps';
|
||||
|
||||
// Update the network chart
|
||||
if (window.updateNetworkChart) {
|
||||
window.updateNetworkChart(upSpeed, downSpeed);
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
console.error('Error fetching hardware stats:', error);
|
||||
});
|
||||
}
|
||||
|
||||
// Function to fetch process stats
|
||||
function fetchProcessStats() {
|
||||
fetch('/api/process-stats')
|
||||
.then(response => {
|
||||
if (!response.ok) {
|
||||
throw new Error('Network response was not ok');
|
||||
}
|
||||
return response.json();
|
||||
})
|
||||
.then(data => {
|
||||
// Update the CPU and Memory charts with new data
|
||||
if (window.updateCpuChart && data.processes) {
|
||||
window.updateCpuChart(data.processes);
|
||||
}
|
||||
if (window.updateMemoryChart && data.processes) {
|
||||
window.updateMemoryChart(data.processes);
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
console.error('Error fetching process stats:', error);
|
||||
});
|
||||
}
|
||||
|
||||
// Function to fetch all stats
|
||||
function fetchAllStats() {
|
||||
fetchHardwareStats();
|
||||
fetchProcessStats();
|
||||
|
||||
// Schedule the next update - use requestAnimationFrame for smoother updates
|
||||
requestAnimationFrame(function() {
|
||||
setTimeout(fetchAllStats, 2000); // Update every 2 seconds
|
||||
});
|
||||
}
|
||||
|
||||
// Start fetching all stats if we're on the system info page
|
||||
if (document.getElementById('cpu-chart') ||
|
||||
document.getElementById('memory-chart') ||
|
||||
document.getElementById('network-chart')) {
|
||||
fetchAllStats();
|
||||
}
|
||||
|
||||
// Also update the chart when new hardware stats are loaded via Unpoly
|
||||
document.addEventListener('up:fragment:loaded', function(event) {
|
||||
if (event.target && event.target.classList.contains('hardware-stats')) {
|
||||
// Extract network speeds from the table
|
||||
var networkCell = event.target.querySelector('tr:nth-child(4) td');
|
||||
if (networkCell) {
|
||||
var networkText = networkCell.textContent;
|
||||
var upMatch = networkText.match(/Up: ([\d.]+Mbps)/);
|
||||
var downMatch = networkText.match(/Down: ([\d.]+Mbps)/);
|
||||
|
||||
var upSpeed = upMatch ? upMatch[1] : '0Mbps';
|
||||
var downSpeed = downMatch ? downMatch[1] : '0Mbps';
|
||||
|
||||
// Update the chart with new data
|
||||
if (window.updateNetworkChart) {
|
||||
window.updateNetworkChart(upSpeed, downSpeed);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
45
_pkg2_dont_use/heroagent/web/static/js/echarts/echarts.min.js
vendored
Normal file
45
_pkg2_dont_use/heroagent/web/static/js/echarts/echarts.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
305
_pkg2_dont_use/heroagent/web/static/js/service-logs.js
Normal file
305
_pkg2_dont_use/heroagent/web/static/js/service-logs.js
Normal file
@@ -0,0 +1,305 @@
|
||||
// Variables for logs functionality
|
||||
let currentServiceName = '';
|
||||
let autoRefreshEnabled = false;
|
||||
let autoRefreshInterval = null;
|
||||
const AUTO_REFRESH_RATE = 3000; // 3 seconds
|
||||
|
||||
// Function to show process logs
|
||||
function showProcessLogs(name) {
|
||||
currentServiceName = name;
|
||||
|
||||
// Create modal if it doesn't exist
|
||||
let modal = document.getElementById('logs-modal');
|
||||
if (!modal) {
|
||||
modal = createLogsModal();
|
||||
}
|
||||
|
||||
document.getElementById('logs-modal-title').textContent = `Service Logs: ${name}`;
|
||||
modal.style.display = 'block';
|
||||
fetchProcessLogs(name);
|
||||
}
|
||||
|
||||
// Function to create the logs modal
|
||||
function createLogsModal() {
|
||||
const modal = document.createElement('div');
|
||||
modal.id = 'logs-modal';
|
||||
modal.className = 'modal';
|
||||
modal.style.display = 'none';
|
||||
modal.innerHTML = `
|
||||
<div class="modal-background" onclick="closeLogsModal()"></div>
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<h3 id="logs-modal-title">Service Logs</h3>
|
||||
<span class="close" onclick="closeLogsModal()">×</span>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<pre id="logs-content">Loading logs...</pre>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<label class="auto-refresh-toggle">
|
||||
<input type="checkbox" id="auto-refresh-checkbox" onchange="toggleAutoRefresh()">
|
||||
<span>Auto-refresh</span>
|
||||
</label>
|
||||
<button class="button secondary" onclick="closeLogsModal()">Close</button>
|
||||
<button class="button primary" onclick="refreshLogs()">Refresh</button>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
document.body.appendChild(modal);
|
||||
|
||||
// Add modal styles
|
||||
const style = document.createElement('style');
|
||||
style.textContent = `
|
||||
.modal {
|
||||
display: none;
|
||||
position: fixed;
|
||||
z-index: 1000;
|
||||
left: 0;
|
||||
top: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
overflow: auto;
|
||||
background-color: rgba(0,0,0,0.4);
|
||||
}
|
||||
|
||||
.modal-content {
|
||||
background-color: #fefefe;
|
||||
margin: 10% auto;
|
||||
padding: 0;
|
||||
border: 1px solid #888;
|
||||
width: 80%;
|
||||
max-width: 800px;
|
||||
box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.modal-header {
|
||||
padding: 10px 15px;
|
||||
background-color: #f8f9fa;
|
||||
border-bottom: 1px solid #dee2e6;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.modal-header h3 {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
.close {
|
||||
color: #aaa;
|
||||
font-size: 28px;
|
||||
font-weight: bold;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.close:hover,
|
||||
.close:focus {
|
||||
color: black;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.modal-body {
|
||||
padding: 15px;
|
||||
max-height: 500px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.modal-body pre {
|
||||
white-space: pre-wrap;
|
||||
word-wrap: break-word;
|
||||
background-color: #f8f9fa;
|
||||
padding: 10px;
|
||||
border-radius: 4px;
|
||||
border: 1px solid #dee2e6;
|
||||
font-family: monospace;
|
||||
margin: 0;
|
||||
height: 400px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.modal-footer {
|
||||
padding: 10px 15px;
|
||||
background-color: #f8f9fa;
|
||||
border-top: 1px solid #dee2e6;
|
||||
display: flex;
|
||||
justify-content: flex-end;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.auto-refresh-toggle {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
margin-right: auto;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.auto-refresh-toggle input {
|
||||
margin-right: 5px;
|
||||
}
|
||||
`;
|
||||
document.head.appendChild(style);
|
||||
|
||||
return modal;
|
||||
}
|
||||
|
||||
// Function to close the logs modal
|
||||
function closeLogsModal() {
|
||||
const modal = document.getElementById('logs-modal');
|
||||
if (modal) {
|
||||
modal.style.display = 'none';
|
||||
}
|
||||
|
||||
// Disable auto-refresh when closing the modal
|
||||
disableAutoRefresh();
|
||||
currentServiceName = '';
|
||||
}
|
||||
|
||||
// Function to fetch process logs
|
||||
function fetchProcessLogs(name, lines = 10000) {
|
||||
const formData = new FormData();
|
||||
formData.append('name', name);
|
||||
formData.append('lines', lines);
|
||||
|
||||
const logsContent = document.getElementById('logs-content');
|
||||
if (!logsContent) return;
|
||||
|
||||
// Save scroll position if auto-refreshing
|
||||
const isAutoRefresh = autoRefreshEnabled;
|
||||
const scrollTop = isAutoRefresh ? logsContent.scrollTop : 0;
|
||||
const scrollHeight = isAutoRefresh ? logsContent.scrollHeight : 0;
|
||||
const clientHeight = isAutoRefresh ? logsContent.clientHeight : 0;
|
||||
const wasScrolledToBottom = scrollHeight - scrollTop <= clientHeight + 5; // 5px tolerance
|
||||
|
||||
// Only show loading indicator on first load, not during auto-refresh
|
||||
if (!isAutoRefresh) {
|
||||
logsContent.textContent = 'Loading logs...';
|
||||
}
|
||||
|
||||
fetch('/admin/services/logs', {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (data.error) {
|
||||
logsContent.textContent = `Error: ${data.error}`;
|
||||
} else {
|
||||
// Clean up the logs by removing **RESULT** and **ENDRESULT** markers
|
||||
let cleanedLogs = data.logs || 'No logs available';
|
||||
cleanedLogs = cleanedLogs.replace(/\*\*RESULT\*\*/g, '');
|
||||
cleanedLogs = cleanedLogs.replace(/\*\*ENDRESULT\*\*/g, '');
|
||||
// Trim extra whitespace
|
||||
cleanedLogs = cleanedLogs.trim();
|
||||
|
||||
// Format the logs with stderr lines in red
|
||||
if (cleanedLogs.length > 0) {
|
||||
// Clear the logs content
|
||||
logsContent.textContent = '';
|
||||
|
||||
// Split the logs into lines and process each line
|
||||
const lines = cleanedLogs.split('\n');
|
||||
lines.forEach(line => {
|
||||
const logLine = document.createElement('div');
|
||||
|
||||
// Check if this is a stderr line (starts with timestamp followed by E)
|
||||
if (line.match(/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} E /)) {
|
||||
logLine.className = 'stderr-log';
|
||||
logLine.style.color = '#ff3333'; // Red color for stderr
|
||||
}
|
||||
|
||||
logLine.textContent = line;
|
||||
logsContent.appendChild(logLine);
|
||||
});
|
||||
|
||||
// Add some styling for the pre element to maintain formatting
|
||||
logsContent.style.fontFamily = 'monospace';
|
||||
logsContent.style.whiteSpace = 'pre-wrap';
|
||||
|
||||
// Scroll to bottom for first load or if auto-refreshing and was at bottom
|
||||
if (!isAutoRefresh || wasScrolledToBottom) {
|
||||
// Scroll to the bottom of the logs
|
||||
logsContent.scrollTop = logsContent.scrollHeight;
|
||||
} else {
|
||||
// For auto-refresh when not at bottom, maintain the same scroll position
|
||||
logsContent.scrollTop = scrollTop;
|
||||
}
|
||||
} else {
|
||||
logsContent.textContent = 'No logs available';
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
logsContent.textContent = `Error loading logs: ${error.message}`;
|
||||
});
|
||||
}
|
||||
|
||||
// Function to refresh logs for the current service
|
||||
function refreshLogs() {
|
||||
if (currentServiceName) {
|
||||
fetchProcessLogs(currentServiceName);
|
||||
}
|
||||
}
|
||||
|
||||
// Function to toggle auto-refresh
|
||||
function toggleAutoRefresh() {
|
||||
const checkbox = document.getElementById('auto-refresh-checkbox');
|
||||
if (checkbox && checkbox.checked) {
|
||||
enableAutoRefresh();
|
||||
} else {
|
||||
disableAutoRefresh();
|
||||
}
|
||||
}
|
||||
|
||||
// Function to enable auto-refresh
|
||||
function enableAutoRefresh() {
|
||||
// Don't create multiple intervals
|
||||
if (autoRefreshInterval) {
|
||||
clearInterval(autoRefreshInterval);
|
||||
}
|
||||
|
||||
// Set the flag
|
||||
autoRefreshEnabled = true;
|
||||
|
||||
// Create the interval
|
||||
autoRefreshInterval = setInterval(() => {
|
||||
if (currentServiceName) {
|
||||
fetchProcessLogs(currentServiceName);
|
||||
}
|
||||
}, AUTO_REFRESH_RATE);
|
||||
|
||||
console.log('Auto-refresh enabled with interval:', AUTO_REFRESH_RATE, 'ms');
|
||||
}
|
||||
|
||||
// Function to disable auto-refresh
|
||||
function disableAutoRefresh() {
|
||||
autoRefreshEnabled = false;
|
||||
|
||||
if (autoRefreshInterval) {
|
||||
clearInterval(autoRefreshInterval);
|
||||
autoRefreshInterval = null;
|
||||
}
|
||||
|
||||
// Uncheck the checkbox if it exists
|
||||
const checkbox = document.getElementById('auto-refresh-checkbox');
|
||||
if (checkbox) {
|
||||
checkbox.checked = false;
|
||||
}
|
||||
|
||||
console.log('Auto-refresh disabled');
|
||||
}
|
||||
|
||||
// Close modal when clicking outside of it
|
||||
window.addEventListener('click', function(event) {
|
||||
const modal = document.getElementById('logs-modal');
|
||||
if (modal && event.target === modal) {
|
||||
closeLogsModal();
|
||||
}
|
||||
});
|
||||
|
||||
// Allow ESC key to close the modal
|
||||
document.addEventListener('keydown', function(event) {
|
||||
if (event.key === 'Escape') {
|
||||
closeLogsModal();
|
||||
}
|
||||
});
|
260
_pkg2_dont_use/heroagent/web/static/js/services.js
Normal file
260
_pkg2_dont_use/heroagent/web/static/js/services.js
Normal file
@@ -0,0 +1,260 @@
|
||||
// Function to refresh services
|
||||
function refreshServices() {
|
||||
const servicesTable = document.getElementById('services-table');
|
||||
fetch('/admin/services/data')
|
||||
.then(response => {
|
||||
if (!response.ok) {
|
||||
return response.json().then(err => {
|
||||
throw new Error(err.error || 'Failed to refresh services');
|
||||
});
|
||||
}
|
||||
return response.text();
|
||||
})
|
||||
.then(html => {
|
||||
servicesTable.innerHTML = html;
|
||||
})
|
||||
.catch(error => {
|
||||
console.error('Error refreshing services:', error);
|
||||
// Show error message in the services table instead of replacing it
|
||||
const errorHtml = `<table><tbody><tr><td colspan="4"><div class="alert alert-danger">Error refreshing services: ${error.message}</div></td></tr></tbody></table>`;
|
||||
servicesTable.innerHTML = errorHtml;
|
||||
// Try again after a short delay
|
||||
setTimeout(() => {
|
||||
refreshServices();
|
||||
}, 3000);
|
||||
});
|
||||
}
|
||||
|
||||
// Refresh services as soon as the page loads
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
refreshServices();
|
||||
});
|
||||
|
||||
// Function to start a new service
|
||||
function startService(event) {
|
||||
event.preventDefault();
|
||||
const form = document.getElementById('start-service-form');
|
||||
const resultDiv = document.getElementById('start-result');
|
||||
|
||||
const formData = new FormData(form);
|
||||
|
||||
fetch('/admin/services/start', {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (data.error) {
|
||||
resultDiv.className = 'alert alert-danger';
|
||||
resultDiv.textContent = data.error;
|
||||
} else {
|
||||
resultDiv.className = 'alert alert-success';
|
||||
resultDiv.textContent = data.message;
|
||||
form.reset();
|
||||
refreshServices();
|
||||
}
|
||||
resultDiv.style.display = 'block';
|
||||
setTimeout(() => {
|
||||
resultDiv.style.display = 'none';
|
||||
}, 5000);
|
||||
})
|
||||
.catch(error => {
|
||||
resultDiv.className = 'alert alert-danger';
|
||||
resultDiv.textContent = 'An error occurred: ' + error.message;
|
||||
resultDiv.style.display = 'block';
|
||||
});
|
||||
}
|
||||
|
||||
// Function to stop a process
|
||||
function stopProcess(name) {
|
||||
if (!confirm('Are you sure you want to stop this service?')) return;
|
||||
|
||||
const formData = new FormData();
|
||||
formData.append('name', name);
|
||||
|
||||
fetch('/admin/services/stop', {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (data.error) {
|
||||
alert('Error: ' + data.error);
|
||||
} else {
|
||||
refreshServices();
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
alert('An error occurred: ' + error.message);
|
||||
});
|
||||
}
|
||||
|
||||
// Function to restart a process
|
||||
function restartProcess(name) {
|
||||
if (!confirm('Are you sure you want to restart this service?')) return;
|
||||
|
||||
const formData = new FormData();
|
||||
formData.append('name', name);
|
||||
|
||||
fetch('/admin/services/restart', {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (data.error) {
|
||||
alert('Error: ' + data.error);
|
||||
} else {
|
||||
refreshServices();
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
alert('An error occurred: ' + error.message);
|
||||
});
|
||||
}
|
||||
|
||||
// Function to delete a process
|
||||
function deleteProcess(name) {
|
||||
if (!confirm('Are you sure you want to delete this service? This cannot be undone.')) return;
|
||||
|
||||
const formData = new FormData();
|
||||
formData.append('name', name);
|
||||
|
||||
fetch('/admin/services/delete', {
|
||||
method: 'POST',
|
||||
body: formData
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (data.error) {
|
||||
alert('Error: ' + data.error);
|
||||
} else {
|
||||
refreshServices();
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
alert('An error occurred: ' + error.message);
|
||||
});
|
||||
}
|
||||
|
||||
// Function to show process logs
|
||||
function showProcessLogs(name) {
|
||||
// Create a modal to show logs
|
||||
const modal = document.createElement('div');
|
||||
modal.className = 'modal';
|
||||
modal.innerHTML = `
|
||||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<h2>Logs for ${name}</h2>
|
||||
<span class="close">×</span>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<pre id="log-content" style="height: 400px; overflow-y: auto; background: #f5f5f5; padding: 10px;">Loading logs...</pre>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button class="button refresh" onclick="refreshLogs('${name}')">Refresh Logs</button>
|
||||
<button class="button secondary" onclick="closeModal()">Close</button>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
document.body.appendChild(modal);
|
||||
|
||||
// Add modal styles if not already present
|
||||
if (!document.getElementById('modal-styles')) {
|
||||
const style = document.createElement('style');
|
||||
style.id = 'modal-styles';
|
||||
style.innerHTML = `
|
||||
.modal {
|
||||
display: block;
|
||||
position: fixed;
|
||||
z-index: 1000;
|
||||
left: 0;
|
||||
top: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
background-color: rgba(0,0,0,0.4);
|
||||
}
|
||||
.modal-content {
|
||||
background-color: #fefefe;
|
||||
margin: 5% auto;
|
||||
padding: 20px;
|
||||
border: 1px solid #888;
|
||||
width: 80%;
|
||||
max-width: 800px;
|
||||
border-radius: 5px;
|
||||
box-shadow: 0 4px 8px rgba(0,0,0,0.1);
|
||||
}
|
||||
.modal-header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
border-bottom: 1px solid #eee;
|
||||
padding-bottom: 10px;
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
.modal-footer {
|
||||
border-top: 1px solid #eee;
|
||||
padding-top: 15px;
|
||||
margin-top: 15px;
|
||||
text-align: right;
|
||||
}
|
||||
.close {
|
||||
color: #aaa;
|
||||
font-size: 28px;
|
||||
font-weight: bold;
|
||||
cursor: pointer;
|
||||
}
|
||||
.close:hover {
|
||||
color: black;
|
||||
}
|
||||
`;
|
||||
document.head.appendChild(style);
|
||||
}
|
||||
|
||||
// Close modal when clicking the X
|
||||
modal.querySelector('.close').onclick = closeModal;
|
||||
|
||||
// Load the logs
|
||||
loadLogs(name);
|
||||
|
||||
// Close modal when clicking outside
|
||||
window.onclick = function(event) {
|
||||
if (event.target === modal) {
|
||||
closeModal();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Function to load logs
|
||||
function loadLogs(name) {
|
||||
fetch(`/admin/services/logs?name=${encodeURIComponent(name)}&lines=100`)
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
const logContent = document.getElementById('log-content');
|
||||
if (data.error) {
|
||||
logContent.textContent = `Error: ${data.error}`;
|
||||
} else {
|
||||
logContent.textContent = data.logs || 'No logs available';
|
||||
// Scroll to bottom
|
||||
logContent.scrollTop = logContent.scrollHeight;
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
document.getElementById('log-content').textContent = `Error loading logs: ${error.message}`;
|
||||
});
|
||||
}
|
||||
|
||||
// Function to refresh logs
|
||||
function refreshLogs(name) {
|
||||
document.getElementById('log-content').textContent = 'Refreshing logs...';
|
||||
loadLogs(name);
|
||||
}
|
||||
|
||||
// Function to close the modal
|
||||
function closeModal() {
|
||||
const modal = document.querySelector('.modal');
|
||||
if (modal) {
|
||||
document.body.removeChild(modal);
|
||||
}
|
||||
window.onclick = null;
|
||||
}
|
1
_pkg2_dont_use/heroagent/web/static/js/unpoly.min.js
vendored
Normal file
1
_pkg2_dont_use/heroagent/web/static/js/unpoly.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
77
_pkg2_dont_use/heroagent/web/templates/admin/index.jet
Normal file
77
_pkg2_dont_use/heroagent/web/templates/admin/index.jet
Normal file
@@ -0,0 +1,77 @@
|
||||
{{ extends "layout" }}
|
||||
{{ block documentBody() }}
|
||||
<article>
|
||||
<header>
|
||||
<h2>Dashboard</h2>
|
||||
<p>Welcome to the HeroLauncher Admin Dashboard</p>
|
||||
</header>
|
||||
|
||||
<div class="grid">
|
||||
<div>
|
||||
<article>
|
||||
<header>
|
||||
<h3>System Status</h3>
|
||||
</header>
|
||||
|
||||
<div class="grid">
|
||||
<div>
|
||||
<h4>Services</h4>
|
||||
<p>
|
||||
<strong>12</strong> running
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<h4>CPU</h4>
|
||||
<p>
|
||||
<strong>24%</strong> usage
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<h4>Memory</h4>
|
||||
<p>
|
||||
<strong>1.2GB</strong> / 8GB
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<article>
|
||||
<header>
|
||||
<h3>Recent Activity</h3>
|
||||
</header>
|
||||
|
||||
<ul>
|
||||
<li>Service 'redis' started (2 minutes ago)</li>
|
||||
<li>Package 'web-ui' updated (10 minutes ago)</li>
|
||||
<li>System backup completed (1 hour ago)</li>
|
||||
<li>User 'admin' logged in (2 hours ago)</li>
|
||||
</ul>
|
||||
</article>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<article>
|
||||
<header>
|
||||
<h3>Quick Actions</h3>
|
||||
</header>
|
||||
|
||||
<div class="grid">
|
||||
<div>
|
||||
<a href="/admin/services/start" role="button">Start Service</a>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<a href="/admin/services/stop" role="button" class="secondary">Stop Service</a>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<a href="/admin/packages/install" role="button" class="contrast">Install Package</a>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
</article>
|
||||
{{ end }}
|
56
_pkg2_dont_use/heroagent/web/templates/admin/jobs.jet
Normal file
56
_pkg2_dont_use/heroagent/web/templates/admin/jobs.jet
Normal file
@@ -0,0 +1,56 @@
|
||||
{{ extends "./layout" }}
|
||||
|
||||
{{ block documentBody() }}
|
||||
<div class="main-content">
|
||||
<header class="action-header">
|
||||
<div>
|
||||
<h2>Jobs</h2>
|
||||
<p>Manage all your scheduled jobs</p>
|
||||
</div>
|
||||
<div>
|
||||
<a href="/admin/jobs/new" class="button">Add New Job</a>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
{{if len(warning) > 0}}
|
||||
<div class="alert alert-warning">
|
||||
{{warning}}
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
{{if len(error) > 0}}
|
||||
<div class="alert alert-error">
|
||||
{{error}}
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
<section>
|
||||
<div class="card">
|
||||
<div class="card-title">Filter Jobs</div>
|
||||
<div class="card-content">
|
||||
<form action="/admin/jobs/list" up-target="#jobs-list">
|
||||
<div class="form-group">
|
||||
<label for="circleid">Circle ID</label>
|
||||
<input id="circleid" type="text" name="circleid" placeholder="Enter circle ID">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="topic">Topic</label>
|
||||
<input id="topic" type="text" name="topic" placeholder="Enter topic">
|
||||
</div>
|
||||
<div class="form-actions">
|
||||
<button class="button" type="submit">Filter Jobs</button>
|
||||
<a href="/admin/jobs/list" class="button" up-target="#jobs-list">Refresh</a>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="jobs-list">
|
||||
<!-- This will be populated by the server response -->
|
||||
<div up-hungry>
|
||||
<a href="/admin/jobs/list" up-target="#jobs-list" up-preload up-eager></a>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</div>
|
||||
{{ end }}
|
@@ -0,0 +1,44 @@
|
||||
<div class="card">
|
||||
<div class="card-title">Jobs List</div>
|
||||
|
||||
{{if len(error) > 0}}
|
||||
<div class="alert alert-error">
|
||||
{{error}}
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
<div class="card-content">
|
||||
<table class="table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Job ID</th>
|
||||
<th>Circle ID</th>
|
||||
<th>Topic</th>
|
||||
<th>Status</th>
|
||||
<th>Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{if len(jobs) == 0}}
|
||||
<tr>
|
||||
<td colspan="5" class="text-center">No jobs found</td>
|
||||
</tr>
|
||||
{{else}}
|
||||
{{range job := jobs}}
|
||||
<tr>
|
||||
<td>{{job.JobID}}</td>
|
||||
<td>{{job.CircleID}}</td>
|
||||
<td>{{job.Topic}}</td>
|
||||
<td>
|
||||
<span class="status-badge status-{{job.Status}}">{{job.Status}}</span>
|
||||
</td>
|
||||
<td>
|
||||
<a href="/admin/jobs/get/{{job.JobID}}" class="button button-small" up-target=".main-content">View</a>
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
37
_pkg2_dont_use/heroagent/web/templates/admin/layout.jet
Normal file
37
_pkg2_dont_use/heroagent/web/templates/admin/layout.jet
Normal file
@@ -0,0 +1,37 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>HeroLauncher Admin</title>
|
||||
<link rel="icon" href="/img/hero-icon.svg" type="image/svg+xml">
|
||||
<link rel="shortcut icon" href="/favicon.ico">
|
||||
<link rel="stylesheet" href="/css/pico.min.css">
|
||||
<link rel="stylesheet" href="/css/admin.css">
|
||||
<link rel="stylesheet" href="/css/unpoly.min.css">
|
||||
<link rel="stylesheet" href="/css/logs.css">
|
||||
<link rel="stylesheet" href="/css/jobs.css">
|
||||
<style>
|
||||
:root {
|
||||
--font-size: 70%; /* Reduce font size by 30% */
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
{{ include "partials/header" }}
|
||||
<div class="sidebar">
|
||||
<nav>
|
||||
{{ include "partials/sidebar" }}
|
||||
</nav>
|
||||
</div>
|
||||
<main>
|
||||
{{block documentBody()}}{{end}}
|
||||
</main>
|
||||
|
||||
<script src="/js/unpoly.min.js"></script>
|
||||
<script src="/js/echarts/echarts.min.js"></script>
|
||||
<script src="/js/admin.js"></script>
|
||||
{{block scripts()}}{{end}}
|
||||
|
||||
</body>
|
||||
</html>
|
@@ -0,0 +1,86 @@
|
||||
{{ extends "../layout" }}
|
||||
|
||||
{{ block documentBody() }}
|
||||
<div class="container-fluid p-4">
|
||||
<div class="row mb-4">
|
||||
<div class="col">
|
||||
<h1 class="mb-3">OpenRPC Manager</h1>
|
||||
<p class="lead">This page provides access to all available OpenRPC servers and their APIs.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row mb-4">
|
||||
<div class="col">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0">Available OpenRPC Servers</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Server Name</th>
|
||||
<th>Description</th>
|
||||
<th>Status</th>
|
||||
<th>Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Virtual File System (VFS)</td>
|
||||
<td>Provides file system operations including upload, download, and metadata management</td>
|
||||
<td>
|
||||
<span class="badge bg-success">Running</span>
|
||||
</td>
|
||||
<td>
|
||||
<a href="/admin/openrpc/vfs" class="btn btn-sm btn-primary">View API</a>
|
||||
<a href="/api/vfs/openrpc" target="_blank" class="btn btn-sm btn-secondary ms-2">Schema</a>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0">OpenRPC Information</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<p>
|
||||
<strong>What is OpenRPC?</strong> OpenRPC is a standard for describing JSON-RPC 2.0 APIs, similar to how OpenAPI (Swagger) describes REST APIs.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<strong>Benefits:</strong>
|
||||
<ul>
|
||||
<li>Standardized API documentation</li>
|
||||
<li>Automatic client and server code generation</li>
|
||||
<li>Consistent interface across different programming languages</li>
|
||||
<li>Self-documenting APIs with built-in schema validation</li>
|
||||
</ul>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<strong>Learn more:</strong>
|
||||
<a href="https://open-rpc.org/" target="_blank">open-rpc.org</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{ block scripts() }}
|
||||
<script>
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
// Add any JavaScript functionality here
|
||||
console.log('OpenRPC Manager page loaded');
|
||||
});
|
||||
</script>
|
||||
{{ end }}
|
235
_pkg2_dont_use/heroagent/web/templates/admin/openrpc/vfs.jet
Normal file
235
_pkg2_dont_use/heroagent/web/templates/admin/openrpc/vfs.jet
Normal file
@@ -0,0 +1,235 @@
|
||||
{{ extends "../layout" }}
|
||||
|
||||
{{ block documentBody() }}
|
||||
<div class="container-fluid p-4">
|
||||
<div class="row mb-4">
|
||||
<div class="col">
|
||||
<h1 class="mb-3">Virtual File System API</h1>
|
||||
<p class="lead">This page provides access to the VFS OpenRPC API documentation, methods, and logs.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Tabs navigation -->
|
||||
<div class="row mb-4">
|
||||
<div class="col">
|
||||
<ul class="nav nav-tabs" id="vfsTabs">
|
||||
<li class="nav-item">
|
||||
<a class="nav-link active" href="#overview" up-target=".tab-content">Overview</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="/admin/openrpc/vfs/logs" up-target="#logs">Logs</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Tab content -->
|
||||
<div class="tab-content">
|
||||
<!-- Overview tab -->
|
||||
<div id="overview">
|
||||
{{ include "./vfs_overview" }}
|
||||
</div>
|
||||
|
||||
<!-- Logs tab (will be loaded via Unpoly) -->
|
||||
<div id="logs">
|
||||
<div class="text-center py-5">
|
||||
<div class="spinner-border" role="status">
|
||||
<div class="mt-3">Loading logs...</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{ block scripts() }}
|
||||
<script>
|
||||
/* Handle tab switching */
|
||||
up.compiler('#vfsTabs a', function(element) {
|
||||
element.addEventListener('click', function(e) {
|
||||
/* Remove active class from all tabs */
|
||||
document.querySelectorAll('#vfsTabs a').forEach(function(tab) {
|
||||
tab.classList.remove('active');
|
||||
});
|
||||
|
||||
/* Add active class to clicked tab */
|
||||
element.classList.add('active');
|
||||
|
||||
/* If overview tab is clicked, show overview and hide logs */
|
||||
if (element.getAttribute('href') === '#overview') {
|
||||
e.preventDefault(); /* Prevent default anchor behavior */
|
||||
document.getElementById('overview').style.display = 'block';
|
||||
document.getElementById('logs').style.display = 'none';
|
||||
} else {
|
||||
/* For logs tab, hide overview (logs will be loaded via Unpoly) */
|
||||
document.getElementById('overview').style.display = 'none';
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
const methodSelect = document.getElementById('method-select');
|
||||
const methodParams = document.getElementById('method-params');
|
||||
const paramFields = document.getElementById('param-fields');
|
||||
const executeBtn = document.getElementById('execute-btn');
|
||||
const resultContainer = document.getElementById('result-container');
|
||||
const resultOutput = document.getElementById('result-output');
|
||||
|
||||
/* Method parameter definitions */
|
||||
const methodDefinitions = {
|
||||
'UploadFile': [
|
||||
{ name: 'vfspath', type: 'string', description: 'Path in the virtual file system' },
|
||||
{ name: 'dedupepath', type: 'string', description: 'Path for deduplication' },
|
||||
{ name: 'filepath', type: 'string', description: 'Local file path to upload' }
|
||||
],
|
||||
'UploadDir': [
|
||||
{ name: 'vfspath', type: 'string', description: 'Path in the virtual file system' },
|
||||
{ name: 'dedupepath', type: 'string', description: 'Path for deduplication' },
|
||||
{ name: 'dirpath', type: 'string', description: 'Local directory path to upload' }
|
||||
],
|
||||
'DownloadFile': [
|
||||
{ name: 'vfspath', type: 'string', description: 'Path in the virtual file system' },
|
||||
{ name: 'dedupepath', type: 'string', description: 'Path for deduplication' },
|
||||
{ name: 'destpath', type: 'string', description: 'Local destination path' }
|
||||
],
|
||||
'ExportMeta': [
|
||||
{ name: 'vfspath', type: 'string', description: 'Path in the virtual file system' },
|
||||
{ name: 'destpath', type: 'string', description: 'Local destination path for metadata' }
|
||||
],
|
||||
'ImportMeta': [
|
||||
{ name: 'vfspath', type: 'string', description: 'Path in the virtual file system' },
|
||||
{ name: 'sourcepath', type: 'string', description: 'Local source path for metadata' }
|
||||
],
|
||||
'ExportDedupe': [
|
||||
{ name: 'vfspath', type: 'string', description: 'Path in the virtual file system' },
|
||||
{ name: 'dedupepath', type: 'string', description: 'Path for deduplication' },
|
||||
{ name: 'destpath', type: 'string', description: 'Local destination path for dedupe info' }
|
||||
],
|
||||
'ImportDedupe': [
|
||||
{ name: 'vfspath', type: 'string', description: 'Path in the virtual file system' },
|
||||
{ name: 'dedupepath', type: 'string', description: 'Path for deduplication' },
|
||||
{ name: 'sourcepath', type: 'string', description: 'Local source path for dedupe info' }
|
||||
],
|
||||
'Send': [
|
||||
{ name: 'dedupepath', type: 'string', description: 'Path for deduplication' },
|
||||
{ name: 'pubkeydest', type: 'string', description: 'Public key of destination' },
|
||||
{ name: 'hashlist', type: 'array', description: 'List of hashes to send' },
|
||||
{ name: 'secret', type: 'string', description: 'Secret for authentication' }
|
||||
],
|
||||
'SendExist': [
|
||||
{ name: 'dedupepath', type: 'string', description: 'Path for deduplication' },
|
||||
{ name: 'pubkeydest', type: 'string', description: 'Public key of destination' },
|
||||
{ name: 'hashlist', type: 'array', description: 'List of hashes to check' },
|
||||
{ name: 'secret', type: 'string', description: 'Secret for authentication' }
|
||||
],
|
||||
'ExposeWebDAV': [
|
||||
{ name: 'vfspath', type: 'string', description: 'Path in the virtual file system' },
|
||||
{ name: 'port', type: 'number', description: 'Port to expose on' },
|
||||
{ name: 'username', type: 'string', description: 'WebDAV username' },
|
||||
{ name: 'password', type: 'string', description: 'WebDAV password' }
|
||||
],
|
||||
'Expose9P': [
|
||||
{ name: 'vfspath', type: 'string', description: 'Path in the virtual file system' },
|
||||
{ name: 'port', type: 'number', description: 'Port to expose on' },
|
||||
{ name: 'readonly', type: 'boolean', description: 'Whether to expose as read-only' }
|
||||
]
|
||||
};
|
||||
|
||||
/* When a method is selected, show the parameter form */
|
||||
methodSelect.addEventListener('change', function() {
|
||||
const selectedMethod = this.value;
|
||||
|
||||
if (!selectedMethod) {
|
||||
methodParams.classList.add('d-none');
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear previous parameters */
|
||||
paramFields.innerHTML = '';
|
||||
|
||||
/* Add parameter fields for the selected method */
|
||||
const params = methodDefinitions[selectedMethod] || [];
|
||||
params.forEach(param => {
|
||||
const formGroup = document.createElement('div');
|
||||
formGroup.className = 'form-group mb-2';
|
||||
|
||||
const label = document.createElement('label');
|
||||
label.textContent = `${param.name} (${param.type}):`;
|
||||
label.setAttribute('for', `param-${param.name}`);
|
||||
|
||||
const input = document.createElement('input');
|
||||
input.className = 'form-control';
|
||||
input.id = `param-${param.name}`;
|
||||
input.name = param.name;
|
||||
input.setAttribute('data-type', param.type);
|
||||
|
||||
if (param.type === 'boolean') {
|
||||
input.type = 'checkbox';
|
||||
input.className = 'form-check-input ms-2';
|
||||
} else {
|
||||
input.type = 'text';
|
||||
}
|
||||
|
||||
const small = document.createElement('small');
|
||||
small.className = 'form-text text-muted';
|
||||
small.textContent = param.description;
|
||||
|
||||
formGroup.appendChild(label);
|
||||
formGroup.appendChild(input);
|
||||
formGroup.appendChild(small);
|
||||
paramFields.appendChild(formGroup);
|
||||
});
|
||||
|
||||
methodParams.classList.remove('d-none');
|
||||
});
|
||||
|
||||
/* Execute button handler */
|
||||
executeBtn.addEventListener('click', function() {
|
||||
const selectedMethod = methodSelect.value;
|
||||
if (!selectedMethod) return;
|
||||
|
||||
const params = {};
|
||||
const paramDefs = methodDefinitions[selectedMethod] || [];
|
||||
|
||||
/* Collect parameter values */
|
||||
paramDefs.forEach(param => {
|
||||
const input = document.getElementById(`param-${param.name}`);
|
||||
if (!input) return;
|
||||
|
||||
let value = input.value;
|
||||
if (param.type === 'boolean') {
|
||||
value = input.checked;
|
||||
} else if (param.type === 'number') {
|
||||
value = parseFloat(value);
|
||||
} else if (param.type === 'array' && value) {
|
||||
try {
|
||||
value = JSON.parse(value);
|
||||
} catch (e) {
|
||||
value = value.split(',').map(item => item.trim());
|
||||
}
|
||||
}
|
||||
|
||||
params[param.name] = value;
|
||||
});
|
||||
|
||||
/* Call the API */
|
||||
fetch(`/api/vfs/${selectedMethod.toLowerCase()}`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify(params)
|
||||
})
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
resultOutput.textContent = JSON.stringify(data, null, 2);
|
||||
resultContainer.classList.remove('d-none');
|
||||
})
|
||||
.catch(error => {
|
||||
resultOutput.textContent = `Error: ${error.message}`;
|
||||
resultContainer.classList.remove('d-none');
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{{ end }}
|
@@ -0,0 +1,118 @@
|
||||
<div class="row mb-4">
|
||||
<div class="col">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0">OpenRPC Schema</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<p>The OpenRPC schema describes all available methods for interacting with the Virtual File System.</p>
|
||||
<a href="/api/vfs/openrpc" target="_blank" class="btn btn-primary">View Schema</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0">Available Methods</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Method</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>UploadFile</td>
|
||||
<td>Uploads a file to the virtual file system</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>UploadDir</td>
|
||||
<td>Uploads a directory to the virtual file system</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>DownloadFile</td>
|
||||
<td>Downloads a file from the virtual file system</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>ExportMeta</td>
|
||||
<td>Exports metadata from the virtual file system</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>ImportMeta</td>
|
||||
<td>Imports metadata to the virtual file system</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>ExportDedupe</td>
|
||||
<td>Exports dedupe information from the virtual file system</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>ImportDedupe</td>
|
||||
<td>Imports dedupe information to the virtual file system</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Send</td>
|
||||
<td>Sends files based on dedupe hashes to a destination</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>SendExist</td>
|
||||
<td>Checks which dedupe hashes exist and returns a list</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>ExposeWebDAV</td>
|
||||
<td>Exposes the virtual file system via WebDAV</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Expose9P</td>
|
||||
<td>Exposes the virtual file system via 9P protocol</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row mt-4">
|
||||
<div class="col">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0">API Testing</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<p class="mb-3">You can test the VFS API methods directly from this interface.</p>
|
||||
<div class="form-group mb-3">
|
||||
<label for="method-select">Select Method:</label>
|
||||
<select id="method-select" class="form-control">
|
||||
<option value="">-- Select a method --</option>
|
||||
<option value="UploadFile">UploadFile</option>
|
||||
<option value="UploadDir">UploadDir</option>
|
||||
<option value="DownloadFile">DownloadFile</option>
|
||||
<option value="ExportMeta">ExportMeta</option>
|
||||
<option value="ImportMeta">ImportMeta</option>
|
||||
<option value="ExportDedupe">ExportDedupe</option>
|
||||
<option value="ImportDedupe">ImportDedupe</option>
|
||||
<option value="Send">Send</option>
|
||||
<option value="SendExist">SendExist</option>
|
||||
<option value="ExposeWebDAV">ExposeWebDAV</option>
|
||||
<option value="Expose9P">Expose9P</option>
|
||||
</select>
|
||||
</div>
|
||||
<div id="method-params" class="d-none">
|
||||
<h6 class="mb-3">Parameters:</h6>
|
||||
<div id="param-fields"></div>
|
||||
</div>
|
||||
<button id="execute-btn" class="btn btn-primary mt-3">Execute Method</button>
|
||||
<div id="result-container" class="mt-4 d-none">
|
||||
<h6>Result:</h6>
|
||||
<pre id="result-output" class="bg-light p-3 border rounded"></pre>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
@@ -0,0 +1,25 @@
|
||||
<!-- header -->
|
||||
<header>
|
||||
<nav class="top-nav">
|
||||
<div class="brand">
|
||||
<a href="/admin">
|
||||
<img class="brand-icon" src="/img/hero-icon.svg" alt="HeroLauncher Logo" width="24" height="24">
|
||||
<span>HeroLauncher</span>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div class="nav-links">
|
||||
<a class="nav-link" href="/admin">Home</a>
|
||||
<a class="nav-link" href="/admin/services">Services</a>
|
||||
<a class="nav-link" href="/admin/system/info">System</a>
|
||||
</div>
|
||||
|
||||
<div class="nav-right">
|
||||
<input class="search-box" type="search" placeholder="Search...">
|
||||
<button class="menu-toggle" aria-label="Toggle menu">
|
||||
<span>Menu</span>
|
||||
</button>
|
||||
<a role="button" href="/">Back to App</a>
|
||||
</div>
|
||||
</nav>
|
||||
</header>
|
@@ -0,0 +1,7 @@
|
||||
<!-- log-panel - Log panel component -->
|
||||
<div class="log-panel">
|
||||
<h3>System Logs</h3>
|
||||
<div class="log-content"></div>
|
||||
</div>
|
||||
|
||||
<button class="log-toggle" aria-label="Toggle logs">Logs</button>
|
@@ -0,0 +1,23 @@
|
||||
<!-- sidebar -->
|
||||
<div class="sidebar-wrapper">
|
||||
<nav class="sidebar-nav">
|
||||
<div class="sidebar-section">
|
||||
<a class="sidebar-link" href="/admin">Dashboard</a>
|
||||
</div>
|
||||
<div class="sidebar-section">
|
||||
<a class="sidebar-link" href="/admin/system/info">System</a>
|
||||
</div>
|
||||
<div class="sidebar-section">
|
||||
<a class="sidebar-link" href="/admin/system/processes">Processes</a>
|
||||
</div>
|
||||
<div class="sidebar-section">
|
||||
<a class="sidebar-link" href="/admin/services">Services</a>
|
||||
</div>
|
||||
<div class="sidebar-section">
|
||||
<a class="sidebar-link" href="/jobs">Jobs</a>
|
||||
</div>
|
||||
<div class="sidebar-section">
|
||||
<a class="sidebar-link" href="/admin/system/logs">Logs</a>
|
||||
</div>
|
||||
</nav>
|
||||
</div>
|
47
_pkg2_dont_use/heroagent/web/templates/admin/services.jet
Normal file
47
_pkg2_dont_use/heroagent/web/templates/admin/services.jet
Normal file
@@ -0,0 +1,47 @@
|
||||
{{ extends "./layout" }}
|
||||
|
||||
{{ block documentBody() }}
|
||||
<div class="services-page">
|
||||
<h2 class="section-title">Services</h2>
|
||||
<p class="section-description">Manage all your running services</p>
|
||||
<div class="two-column-layout">
|
||||
<div class="card">
|
||||
<div class="card-title">Active Services</div>
|
||||
<div class="card-actions">
|
||||
<button class="button refresh" onclick="refreshServices()">Refresh</button>
|
||||
</div>
|
||||
|
||||
<!-- Service list -->
|
||||
<div id="services-table">
|
||||
{{ include "./services_fragment" }}
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-title">Start New Service</div>
|
||||
<div class="card-content">
|
||||
<form id="start-service-form" onsubmit="startService(event)">
|
||||
<div class="form-group">
|
||||
<label for="service-name">Service Name</label>
|
||||
<input id="service-name" type="text" name="name" required="required">
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label for="service-command">Command</label>
|
||||
<input id="service-command" type="text" name="command" required="required">
|
||||
</div>
|
||||
|
||||
<div class="form-actions">
|
||||
<button class="button" type="submit">Start Service</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<div id="start-result" class="alert" style="display: none"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
{{ block scripts() }}
|
||||
<script src="/js/services.js"></script>
|
||||
{{ end }}
|
@@ -0,0 +1,47 @@
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Status</th>
|
||||
<th>PID</th>
|
||||
<th>CPU</th>
|
||||
<th>Memory</th>
|
||||
<th>Uptime</th>
|
||||
<th>Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ if processes }}
|
||||
{{ range processes }}
|
||||
<tr>
|
||||
<td>{{ .Name }}</td>
|
||||
<td>
|
||||
{{ if .Status == "running" }}
|
||||
<span class="badge success">Running</span>
|
||||
{{ else if .Status == "stopped" }}
|
||||
<span class="badge danger">Stopped</span>
|
||||
{{ else }}
|
||||
<span class="badge warning">{{ .Status }}</span>
|
||||
{{ end }}
|
||||
</td>
|
||||
<td>{{ .ID }}</td>
|
||||
<td>{{ if .Status == "running" }}{{ .CPU }}{{ else }}-{{ end }}</td>
|
||||
<td>{{ if .Status == "running" }}{{ .Memory }}{{ else }}-{{ end }}</td>
|
||||
<td>{{ if .Status == "running" }}{{ .Uptime }}{{ else }}-{{ end }}</td>
|
||||
<td>
|
||||
<div class="button-group">
|
||||
<button class="button" onclick="restartProcess('{{ .Name }}')">Restart</button>
|
||||
<button class="button secondary" onclick="stopProcess('{{ .Name }}')">Stop</button>
|
||||
<button class="button danger" style="background-color: #e53935 !important; color: #fff !important;" onclick="deleteProcess('{{ .Name }}')">Delete</button>
|
||||
<button class="button info" onclick="showProcessLogs('{{ .Name }}')">Logs</button>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
<tr>
|
||||
<td colspan="7">No services found</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
@@ -0,0 +1,19 @@
|
||||
<!-- Hardware stats fragment for polling updates -->
|
||||
<tbody>
|
||||
<tr>
|
||||
<th scope="row">CPU</th>
|
||||
<td>{{ cpuInfo }} ({{ cpuUsage }})</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Memory</th>
|
||||
<td>{{ memoryInfo }} ({{ memUsage }})</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Disk</th>
|
||||
<td>{{ diskInfo }} ({{ diskUsage }})</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Network</th>
|
||||
<td style="white-space: pre-line;">{{ networkInfo }}</td>
|
||||
</tr>
|
||||
</tbody>
|
79
_pkg2_dont_use/heroagent/web/templates/admin/system/info.jet
Normal file
79
_pkg2_dont_use/heroagent/web/templates/admin/system/info.jet
Normal file
@@ -0,0 +1,79 @@
|
||||
{{ extends "../layout" }}
|
||||
|
||||
{{ block documentBody() }}
|
||||
<article class="system-info">
|
||||
<header>
|
||||
<h2 class="title">System Information</h2>
|
||||
<p class="description text-muted">Overview of system resources and configuration</p>
|
||||
</header>
|
||||
|
||||
<div class="grid" style="display: grid; grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); gap: 1rem;">
|
||||
<div>
|
||||
<article class="hardware-info">
|
||||
<header>
|
||||
<h3 id="hardware-title">Hardware</h3>
|
||||
</header>
|
||||
|
||||
<table class="table table-striped" up:poll="/admin/system/hardware-stats" up:target=".hardware-stats" up:poll-interval="1000">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th scope="row">CPU</th>
|
||||
<td>{{ cpuInfo }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Memory</th>
|
||||
<td>{{ memoryInfo }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Disk</th>
|
||||
<td>{{ diskInfo }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Network</th>
|
||||
<td style="white-space: pre-line;">{{ networkInfo }}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
{{ include "partials/network_chart" }}
|
||||
</article>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<article class="software-info">
|
||||
<header>
|
||||
<h3 id="software-title">Software</h3>
|
||||
</header>
|
||||
|
||||
<table class="table table-bordered" data:type="software-info">
|
||||
<tbody>
|
||||
<tr>
|
||||
<th scope="row">OS</th>
|
||||
<td>{{ osInfo }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">HeroLauncher</th>
|
||||
<td>HeroLauncher</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Uptime</th>
|
||||
<td>{{ uptimeInfo }}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
{{ include "partials/__cpu_chart" }}
|
||||
{{ include "partials/__memory_chart" }}
|
||||
</article>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
{{ end }}
|
||||
|
||||
{{ block scripts() }}
|
||||
<script src="/js/echarts/echarts.min.js"></script>
|
||||
<script src="/js/charts/cpu-chart.js"></script>
|
||||
<script src="/js/charts/memory-chart.js"></script>
|
||||
<script src="/js/charts/network-chart.js"></script>
|
||||
<script src="/js/charts/stats-fetcher.js"></script>
|
||||
{{ end }}
|
58
_pkg2_dont_use/heroagent/web/templates/admin/system/jobs.jet
Normal file
58
_pkg2_dont_use/heroagent/web/templates/admin/system/jobs.jet
Normal file
@@ -0,0 +1,58 @@
|
||||
{{ extends "../layout" }}
|
||||
|
||||
<header>
|
||||
<h2 class="title">System Jobs</h2>
|
||||
<p class="description text-muted">Overview of scheduled jobs</p>
|
||||
</header>
|
||||
<article class="jobs-info">
|
||||
<div class="grid" style="display: grid; grid-template-columns: 1fr; gap: 1rem;">
|
||||
<div>
|
||||
<article class="jobs-table">
|
||||
<header>
|
||||
<h3 id="jobs-title">Scheduled Jobs</h3>
|
||||
<p class="refresh-status">
|
||||
<button class="btn btn-sm" onclick="refreshJobs()">
|
||||
Refresh
|
||||
<span class="loading-indicator" id="refresh-loading" style="display: none;"> Loading...</span>
|
||||
</button>
|
||||
</p>
|
||||
</header>
|
||||
|
||||
<div class="jobs-table-content" up-poll="/admin/system/jobs-data" up-hungry="true" up-interval="10000" style="display: block; width: 100%;" id="jobs-content">
|
||||
{{ if isset(., "error") }}
|
||||
<div class="alert alert-danger">{{ .error }}</div>
|
||||
{{ end }}
|
||||
|
||||
<table class="table table-striped" id="jobs-stats">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">ID</th>
|
||||
<th scope="col">Name</th>
|
||||
<th scope="col">Status</th>
|
||||
<th scope="col">Next Run</th>
|
||||
<th scope="col">Last Run</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ if isset(., "jobs") && len(.jobs) > 0 }}
|
||||
{{ range .jobs }}
|
||||
<tr class="{{ if .is_current }}table-primary{{ end }}">
|
||||
<td>{{ .id }}</td>
|
||||
<td>{{ .name }}</td>
|
||||
<td>{{ .status }}</td>
|
||||
<td>{{ .next_run }}</td>
|
||||
<td>{{ .last_run }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
<tr>
|
||||
<td colspan="5">No job data available.</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</article>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
135
_pkg2_dont_use/heroagent/web/templates/admin/system/logs.jet
Normal file
135
_pkg2_dont_use/heroagent/web/templates/admin/system/logs.jet
Normal file
@@ -0,0 +1,135 @@
|
||||
{{ extends "../layout" }}
|
||||
|
||||
{{ block documentBody() }}
|
||||
<article>
|
||||
<header class="flex-container">
|
||||
<div>
|
||||
<h2>{{title}}</h2>
|
||||
<p>View and filter logs from different sources</p>
|
||||
</div>
|
||||
<div>
|
||||
<a href="/api/logs/export" role="button" class="outline">Export Logs</a>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<article class="filter-controls">
|
||||
<form class="log-controls" id="log-filter-form" action="/admin/system/logs" method="get" up-target="#logs-table-container" up-submit>
|
||||
<div class="grid filter-grid">
|
||||
<div class="filter-item">
|
||||
<label for="log-type">Log Type</label>
|
||||
<select id="log-type" name="log_type">
|
||||
{{range logTypes}}
|
||||
<option value="{{.}}" {{if selectedLogType == '.'}}selected{{end}}>{{if . == "all"}}All Logs{{else if . == "system"}}System Logs{{else if . == "service"}}Service Logs{{else if . == "job"}}Job Logs{{else if . == "process"}}Process Logs{{end}}</option>
|
||||
{{end}}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="filter-item">
|
||||
<label for="log-level">Log Level</label>
|
||||
<select id="log-level" name="type">
|
||||
<option value="all" {{if typeParam == "all" || typeParam == ""}}selected{{end}}>All Levels</option>
|
||||
<option value="info" {{if typeParam == "info"}}selected{{end}}>Info</option>
|
||||
<option value="error" {{if typeParam == "error"}}selected{{end}}>Error</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="filter-item">
|
||||
<label for="log-source">Log Source</label>
|
||||
<select id="log-source" name="category">
|
||||
<option value="" {{if categoryParam == ""}}selected{{end}}>All Sources</option>
|
||||
<option value="system" {{if categoryParam == "system"}}selected{{end}}>System</option>
|
||||
<option value="redis" {{if categoryParam == "redis"}}selected{{end}}>Redis</option>
|
||||
<option value="executor" {{if categoryParam == "executor"}}selected{{end}}>Executor</option>
|
||||
<option value="package" {{if categoryParam == "package"}}selected{{end}}>Package Manager</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="filter-item">
|
||||
<label for="log-from-date">From Date</label>
|
||||
<input type="datetime-local" id="log-from-date" name="from">
|
||||
</div>
|
||||
|
||||
<div class="filter-item">
|
||||
<label for="log-to-date">To Date</label>
|
||||
<input type="datetime-local" id="log-to-date" name="to">
|
||||
</div>
|
||||
|
||||
<div class="filter-button">
|
||||
<button type="submit" class="filter-apply" up-target="#logs-table-container">Apply Filters</button>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</article>
|
||||
|
||||
<article class="log-container">
|
||||
<header>
|
||||
<h3>Log Output</h3>
|
||||
</header>
|
||||
|
||||
<div id="logs-table-container">
|
||||
<!-- Log content is loaded directly -->
|
||||
{{ if isset(., "error") }}
|
||||
<div class="alert alert-danger">{{ .error }}</div>
|
||||
{{ end }}
|
||||
|
||||
<!-- Include logs table -->
|
||||
<div class="log-table">
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Timestamp</th>
|
||||
<th>Level</th>
|
||||
<th>Source</th>
|
||||
<th>Message</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{if isset(., "logs")}}
|
||||
{{range logs}}
|
||||
<tr>
|
||||
<td>{{.timestamp}}</td>
|
||||
<td class="log-{{.type | lower}}">{{.type}}</td>
|
||||
<td>{{.category}}</td>
|
||||
<td>{{.message}}</td>
|
||||
</tr>
|
||||
{{else}}
|
||||
<tr>
|
||||
<td colspan="4" class="text-center">No logs found matching your criteria</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{else}}
|
||||
<tr>
|
||||
<td colspan="4" class="text-center">Loading logs...</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="pagination">
|
||||
<div class="pagination-info">
|
||||
{{if isset(., "logs")}}
|
||||
{{if len(logs) > 0}}
|
||||
<span>Showing {{showing}} of {{total}} logs</span>
|
||||
{{else}}
|
||||
<span>No logs found</span>
|
||||
{{end}}
|
||||
{{else}}
|
||||
<span>Loading logs...</span>
|
||||
{{end}}
|
||||
</div>
|
||||
<div class="pagination-controls">
|
||||
{{if isset(., "page") && isset(., "totalPages")}}
|
||||
{{if page > 1}}
|
||||
<a href="/admin/system/logs?page={{page - 1}}{{if isset(., "categoryParam")}}&category={{categoryParam}}{{end}}{{if isset(., "typeParam")}}&type={{typeParam}}{{end}}{{if isset(., "fromParam")}}&from={{fromParam}}{{end}}{{if isset(., "toParam")}}&to={{toParam}}{{end}}" role="button" class="outline secondary" up-target="#logs-table-container">← Previous</a>
|
||||
{{end}}
|
||||
{{if page < totalPages}}
|
||||
<a href="/admin/system/logs?page={{page + 1}}{{if isset(., "categoryParam")}}&category={{categoryParam}}{{end}}{{if isset(., "typeParam")}}&type={{typeParam}}{{end}}{{if isset(., "fromParam")}}&from={{fromParam}}{{end}}{{if isset(., "toParam")}}&to={{toParam}}{{end}}" role="button" class="outline secondary" up-target="#logs-table-container">Next →</a>
|
||||
{{end}}
|
||||
{{end}}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
</article>
|
||||
{{ end }}
|
@@ -0,0 +1,49 @@
|
||||
<!-- This template contains just the logs table content for Unpoly updates -->
|
||||
{{ if isset(., "error") }}
|
||||
<div class="alert alert-danger">{{ .error }}</div>
|
||||
{{ end }}
|
||||
|
||||
<div class="log-table">
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Timestamp</th>
|
||||
<th>Level</th>
|
||||
<th>Source</th>
|
||||
<th>Message</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{range logs}}
|
||||
<tr>
|
||||
<td>{{.timestamp}}</td>
|
||||
<td class="log-{{.type | lower}}">{{.type}}</td>
|
||||
<td>{{.category}}</td>
|
||||
<td>{{.message}}</td>
|
||||
</tr>
|
||||
{{else}}
|
||||
<tr>
|
||||
<td colspan="4" class="text-center">No logs found matching your criteria</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="pagination">
|
||||
<div class="pagination-info">
|
||||
{{if len(logs) > 0}}
|
||||
<span>Showing {{len(logs)}} of {{total}} logs</span>
|
||||
{{else}}
|
||||
<span>No logs found</span>
|
||||
{{end}}
|
||||
</div>
|
||||
<div class="pagination-controls">
|
||||
{{if page > 1}}
|
||||
<a href="/admin/system/logs?page={{page - 1}}{{if isset(., "selectedLogType")}}&log_type={{selectedLogType}}{{end}}{{if isset(., "categoryParam")}}&category={{categoryParam}}{{end}}{{if isset(., "typeParam")}}&type={{typeParam}}{{end}}{{if isset(., "fromParam")}}&from={{fromParam}}{{end}}{{if isset(., "toParam")}}&to={{toParam}}{{end}}" role="button" class="outline secondary" up-target="#logs-table-container">← Previous</a>
|
||||
{{end}}
|
||||
{{if page < totalPages}}
|
||||
<a href="/admin/system/logs?page={{page + 1}}{{if isset(., "selectedLogType")}}&log_type={{selectedLogType}}{{end}}{{if isset(., "categoryParam")}}&category={{categoryParam}}{{end}}{{if isset(., "typeParam")}}&type={{typeParam}}{{end}}{{if isset(., "fromParam")}}&from={{fromParam}}{{end}}{{if isset(., "toParam")}}&to={{toParam}}{{end}}" role="button" class="outline secondary" up-target="#logs-table-container">Next →</a>
|
||||
{{end}}
|
||||
</div>
|
||||
</div>
|
@@ -0,0 +1,6 @@
|
||||
{{ extends "admin/layout" }}
|
||||
|
||||
{{ block documentBody() }}
|
||||
<h1>Test Logs Page</h1>
|
||||
<p>This is a simple test template</p>
|
||||
{{ end }}
|
@@ -0,0 +1,2 @@
|
||||
<h4 style="margin-bottom: 10px;">Process CPU Usage</h4>
|
||||
<div id="cpu-chart" style="width: 100%; height: 300px; margin-bottom: 30px;"></div>
|
@@ -0,0 +1,2 @@
|
||||
<h4 style="margin-bottom: 10px;">Process Memory Usage</h4>
|
||||
<div id="memory-chart" style="width: 100%; height: 300px;"></div>
|
@@ -0,0 +1 @@
|
||||
<!-- Stats fetcher removed - now loaded from external JS file -->
|
@@ -0,0 +1,2 @@
|
||||
<h4 style="margin-bottom: 10px;">Network Traffic</h4>
|
||||
<div id="network-chart" style="width: 100%; height: 300px; margin-top: 10px;"></div>
|
@@ -0,0 +1,77 @@
|
||||
{{ extends "../layout" }}
|
||||
|
||||
{{ block documentBody() }}
|
||||
<article class="processes-info">
|
||||
<header>
|
||||
<h2 class="title">System Processes</h2>
|
||||
<p class="description text-muted">Overview of running processes with CPU and memory usage</p>
|
||||
</header>
|
||||
|
||||
<div class="grid" style="display: grid; grid-template-columns: 1fr; gap: 1rem;">
|
||||
<div>
|
||||
<article class="processes-table">
|
||||
<header>
|
||||
<h3 id="processes-title">Running Processes</h3>
|
||||
<p class="refresh-status">
|
||||
<button class="btn btn-sm" onclick="refreshProcesses()">
|
||||
Refresh
|
||||
<span class="loading-indicator" id="refresh-loading" style="display: none;"> Loading...</span>
|
||||
</button>
|
||||
</p>
|
||||
</header>
|
||||
|
||||
<div class="processes-table-content" up-poll="/admin/system/processes-data" up-hungry="true" up-interval="10000" style="display: block; width: 100%;" id="processes-content">
|
||||
{{ if isset(., "error") }}
|
||||
<div class="alert alert-danger">{{ .error }}</div>
|
||||
{{ end }}
|
||||
|
||||
<table class="table table-striped" id="processes-stats">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">PID</th>
|
||||
<th scope="col">Name</th>
|
||||
<th scope="col">Status</th>
|
||||
<th scope="col">CPU (%)</th>
|
||||
<th scope="col">Memory (MB)</th>
|
||||
<th scope="col">Created</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ if isset(., "processes") && len(.processes) > 0 }}
|
||||
{{ range .processes }}
|
||||
<tr class="{{ if .is_current }}table-primary{{ end }}">
|
||||
<td>{{ .pid }}</td>
|
||||
<td>{{ .name }}</td>
|
||||
<td>{{ .status }}</td>
|
||||
<td>{{ .cpu_percent_str }}</td>
|
||||
<td>{{ .memory_mb_str }}</td>
|
||||
<td>{{ .create_time_str }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
<tr>
|
||||
<td colspan="6">No process data available.</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</article>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
|
||||
<script>
|
||||
// Ensure processes data is loaded on page load
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
// Check if the processes content is empty or shows 'No process data available'
|
||||
const processesContent = document.getElementById('processes-content');
|
||||
const tableBody = processesContent ? processesContent.querySelector('tbody') : null;
|
||||
|
||||
if (tableBody && (tableBody.innerText.includes('No process data available') || tableBody.children.length <= 1)) {
|
||||
console.log('Triggering initial process data load');
|
||||
refreshProcesses();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
{{ end }}
|
@@ -0,0 +1,48 @@
|
||||
<!-- This template contains just the process table content for AJAX updates -->
|
||||
<div class="processes-table-content">
|
||||
{{ if isset(., "error") }}
|
||||
<div class="alert alert-danger">{{ .error }}</div>
|
||||
{{ end }}
|
||||
|
||||
<!-- Debug info -->
|
||||
<div class="alert alert-info">
|
||||
{{ if isset(., "debug") }}
|
||||
Debug: {{ debug }}
|
||||
{{ end }}
|
||||
|
||||
<!-- Direct debug output to help troubleshoot -->
|
||||
Has processes: {{ hasProcesses ? "Yes" : "No" }}
|
||||
Process count: {{ processCount }}
|
||||
</div>
|
||||
|
||||
<table class="table table-striped" id="processes-stats">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">PID</th>
|
||||
<th scope="col">Name</th>
|
||||
<th scope="col">Status</th>
|
||||
<th scope="col">CPU (%)</th>
|
||||
<th scope="col">Memory (MB)</th>
|
||||
<th scope="col">Created</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ if hasProcesses }}
|
||||
{{ range processStats }}
|
||||
<tr{{ if .is_current == true }} class="table-primary"{{ end }}>
|
||||
<td>{{ .pid }}</td>
|
||||
<td>{{ .name }}</td>
|
||||
<td>{{ .status }}</td>
|
||||
<td>{{ .cpu_percent_str }}</td>
|
||||
<td>{{ .memory_mb_str }}</td>
|
||||
<td>{{ .create_time_str }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
<tr>
|
||||
<td colspan="6">No process data available.</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
@@ -0,0 +1,36 @@
|
||||
<!-- This template contains just the process table content for AJAX updates -->
|
||||
{{ if .error }}
|
||||
<div class="alert alert-danger">{{ .error }}</div>
|
||||
{{ end }}
|
||||
|
||||
<!-- Process data table - regenerated on each refresh -->
|
||||
<table class="table table-striped" id="processes-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope='col'>PID</th>
|
||||
<th scope='col'>Name</th>
|
||||
<th scope='col'>Status</th>
|
||||
<th scope='col'>CPU (%)</th>
|
||||
<th scope='col'>Memory (MB)</th>
|
||||
<th scope='col'>Created</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ if .processes }}
|
||||
{{ range .processes }}
|
||||
<tr{{ if .is_current }} class="table-primary"{{ end }}>
|
||||
<td>{{ .pid }}</td>
|
||||
<td>{{ .name }}</td>
|
||||
<td>{{ .status }}</td>
|
||||
<td>{{ .cpu_percent | printf("%.1f%%") }}</td>
|
||||
<td>{{ .memory_mb | printf("%.1f MB") }}</td>
|
||||
<td>{{ .create_time_str }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
<tr>
|
||||
<td colspan="6">No process data available.</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
@@ -0,0 +1,40 @@
|
||||
{{ if isset(., "error") }}
|
||||
<div class="alert alert-danger">{{ .error }}</div>
|
||||
{{ end }}
|
||||
|
||||
<table class="table table-striped" id="processes-stats">
|
||||
<thead>
|
||||
<tr>
|
||||
<th scope="col">PID</th>
|
||||
<th scope="col">Name</th>
|
||||
<th scope="col">Status</th>
|
||||
<th scope="col">CPU (%)</th>
|
||||
<th scope="col">Memory (MB)</th>
|
||||
<th scope="col">Created</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ if isset(., "processes") }}
|
||||
{{ if .processes }}
|
||||
{{ range .processes }}
|
||||
<tr class="{{ if .is_current }}table-primary{{ end }}">
|
||||
<td>{{.pid}}</td>
|
||||
<td>{{.name}}</td>
|
||||
<td>{{.status}}</td>
|
||||
<td>{{ printf("%.1f%%", .cpu_percent) }}</td>
|
||||
<td>{{ printf("%.1f MB", .memory_mb) }}</td>
|
||||
<td>{{.create_time_str}}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
<tr>
|
||||
<td colspan="6">No process data available.</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
<tr>
|
||||
<td colspan="6">Loading process data...</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
@@ -0,0 +1,77 @@
|
||||
{{ extends "../../layout" }}
|
||||
|
||||
{{ block documentBody() }}
|
||||
<article>
|
||||
<header>
|
||||
<h2>System Settings</h2>
|
||||
<p>Configure system parameters and preferences</p>
|
||||
</header>
|
||||
<form>
|
||||
<div class="grid">
|
||||
<div>
|
||||
<article>
|
||||
<header>
|
||||
<h3>Server Settings</h3>
|
||||
</header>
|
||||
<label for="server-port">Server Port</label>
|
||||
<input id="server-port" type="number" value="9001">
|
||||
|
||||
<label for="log-level">Default Log Level</label>
|
||||
<select id="log-level">
|
||||
<option value="info">Info</option>
|
||||
<option value="warning">Warning</option>
|
||||
<option value="error">Error</option>
|
||||
<option value="debug">Debug</option>
|
||||
</select>
|
||||
|
||||
<label for="max-connections">Max Connections</label>
|
||||
<input id="max-connections" type="number" value="100">
|
||||
</article>
|
||||
</div>
|
||||
<div>
|
||||
<article>
|
||||
<header>
|
||||
<h3>Security Settings</h3>
|
||||
</header>
|
||||
<label for="enable-auth">Enable Authentication</label>
|
||||
<input id="enable-auth" type="checkbox" checked>
|
||||
|
||||
<label for="session-timeout">Session Timeout (minutes)</label>
|
||||
<input id="session-timeout" type="number" value="30">
|
||||
|
||||
<label for="allowed-origins">Allowed Origins (CORS)</label>
|
||||
<input id="allowed-origins" type="text" value="*">
|
||||
</article>
|
||||
</div>
|
||||
</div>
|
||||
<div class="grid">
|
||||
<div>
|
||||
<article>
|
||||
<header>
|
||||
<h3>Redis Settings</h3>
|
||||
</header>
|
||||
<label for="redis-port">Redis Port</label>
|
||||
<input id="redis-port" type="number" value="6378">
|
||||
|
||||
<label for="redis-max-memory">Max Memory (MB)</label>
|
||||
<input id="redis-max-memory" type="number" value="512">
|
||||
</article>
|
||||
</div>
|
||||
<div>
|
||||
<article>
|
||||
<header>
|
||||
<h3>Executor Settings</h3>
|
||||
</header>
|
||||
<label for="executor-timeout">Command Timeout (seconds)</label>
|
||||
<input id="executor-timeout" type="number" value="60">
|
||||
|
||||
<label for="executor-max-processes">Max Concurrent Processes</label>
|
||||
<input id="executor-max-processes" type="number" value="10">
|
||||
</article>
|
||||
</div>
|
||||
</div>
|
||||
<button type="submit">Save Settings</button>
|
||||
<button class="secondary" type="reset">Reset</button>
|
||||
</form>
|
||||
</article>
|
||||
{{ end }}
|
153
_pkg2_dont_use/heroagent/web/templates/admin/vfs_logs.html
Normal file
153
_pkg2_dont_use/heroagent/web/templates/admin/vfs_logs.html
Normal file
@@ -0,0 +1,153 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{{.title}} - HeroLauncher</title>
|
||||
<link rel="stylesheet" href="/static/css/bootstrap.min.css">
|
||||
<link rel="stylesheet" href="/static/css/custom.css">
|
||||
<script src="/static/js/jquery.min.js"></script>
|
||||
<script src="/static/js/bootstrap.bundle.min.js"></script>
|
||||
<script src="/static/js/unpoly.min.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container-fluid p-4">
|
||||
<div class="row mb-4">
|
||||
<div class="col">
|
||||
<h2>{{.managerName}} Logs</h2>
|
||||
<p>View and filter logs from the {{.managerName}} service.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row mb-4">
|
||||
<div class="col">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0">Filter Logs</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<form id="filter-form">
|
||||
<input type="hidden" id="manager" name="manager" value="{{.managerName}}">
|
||||
<input type="hidden" id="endpoint" name="endpoint" value="{{.managerEndpoint}}">
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-3">
|
||||
<label for="method-filter">Filter by Method:</label>
|
||||
<select class="form-control" id="method-filter">
|
||||
<option value="">All Methods</option>
|
||||
{{range .methods}}
|
||||
<option value="{{.}}">{{index $.methodDisplayNames .}}</option>
|
||||
{{end}}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="col-md-3">
|
||||
<label for="status-filter">Filter by Status:</label>
|
||||
<select class="form-control" id="status-filter">
|
||||
<option value="">All Statuses</option>
|
||||
<option value="success">Success</option>
|
||||
<option value="error">Error</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="col-md-3">
|
||||
<label for="date-filter">Filter by Date:</label>
|
||||
<input type="date" class="form-control" id="date-filter">
|
||||
</div>
|
||||
|
||||
<div class="col-md-3">
|
||||
<label for="limit-filter">Limit Results:</label>
|
||||
<select class="form-control" id="limit-filter">
|
||||
<option value="50">50</option>
|
||||
<option value="100">100</option>
|
||||
<option value="200">200</option>
|
||||
<option value="500">500</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row mt-3">
|
||||
<div class="col">
|
||||
<button type="button" id="apply-filters" class="btn btn-primary">Apply Filters</button>
|
||||
<button type="button" id="reset-filters" class="btn btn-secondary">Reset Filters</button>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col">
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
<h5 class="mb-0">Logs</h5>
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<div class="table-responsive">
|
||||
<table class="table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Timestamp</th>
|
||||
<th>Method</th>
|
||||
<th>Status</th>
|
||||
<th>Duration</th>
|
||||
<th>Details</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="logs-table-body">
|
||||
<!-- Logs will be loaded here -->
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
$(document).ready(function() {
|
||||
// Apply filters when the button is clicked
|
||||
$('#apply-filters').click(function() {
|
||||
const queryParams = new URLSearchParams();
|
||||
|
||||
// Get filter values
|
||||
const methodFilter = $('#method-filter').val();
|
||||
const statusFilter = $('#status-filter').val();
|
||||
const dateFilter = $('#date-filter').val();
|
||||
const limitFilter = $('#limit-filter').val();
|
||||
|
||||
// Add filters to query parameters if they are set
|
||||
if (methodFilter) queryParams.append('method', methodFilter);
|
||||
if (statusFilter) queryParams.append('status', statusFilter);
|
||||
if (dateFilter) queryParams.append('date', dateFilter);
|
||||
if (limitFilter) queryParams.append('limit', limitFilter);
|
||||
|
||||
// Add the manager and endpoint parameters to preserve them when reloading
|
||||
queryParams.append('manager', document.getElementById('manager').value);
|
||||
queryParams.append('endpoint', document.getElementById('endpoint').value);
|
||||
|
||||
// Redirect to the same page with new query parameters
|
||||
window.location.href = '/admin/openrpc/vfs/logs?' + queryParams.toString();
|
||||
});
|
||||
|
||||
// Reset filters when the button is clicked
|
||||
$('#reset-filters').click(function() {
|
||||
// Clear all filter inputs
|
||||
$('#method-filter').val('');
|
||||
$('#status-filter').val('');
|
||||
$('#date-filter').val('');
|
||||
$('#limit-filter').val('50');
|
||||
|
||||
// Redirect to the base URL with only manager and endpoint parameters
|
||||
const queryParams = new URLSearchParams();
|
||||
queryParams.append('manager', document.getElementById('manager').value);
|
||||
queryParams.append('endpoint', document.getElementById('endpoint').value);
|
||||
window.location.href = '/admin/openrpc/vfs/logs?' + queryParams.toString();
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
Reference in New Issue
Block a user