...
This commit is contained in:
487
_pkg2_dont_use/heroagent/handlers/job_handlers.go
Normal file
487
_pkg2_dont_use/heroagent/handlers/job_handlers.go
Normal file
@@ -0,0 +1,487 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv" // Added strconv for JobID parsing
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/herojobs"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// RedisClientInterface defines the methods JobHandler needs from a HeroJobs Redis client.
|
||||
type RedisClientInterface interface {
|
||||
StoreJob(job *herojobs.Job) error
|
||||
EnqueueJob(job *herojobs.Job) error
|
||||
GetJob(jobID interface{}) (*herojobs.Job, error) // Changed jobID type to interface{}
|
||||
ListJobs(circleID, topic string) ([]uint32, error)
|
||||
QueueSize(circleID, topic string) (int64, error)
|
||||
QueueEmpty(circleID, topic string) error
|
||||
// herojobs.Job also has Load() and Save() methods, but those are on the Job object itself,
|
||||
// not typically part of the client interface unless the client is a facade for all job operations.
|
||||
}
|
||||
|
||||
// JobHandler handles job-related routes
|
||||
type JobHandler struct {
|
||||
client RedisClientInterface // Changed to use the interface
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewJobHandler creates a new JobHandler
|
||||
func NewJobHandler(redisAddr string, logger *log.Logger) (*JobHandler, error) {
|
||||
redisClient, err := herojobs.NewRedisClient(redisAddr, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HeroJobs Redis client: %w", err)
|
||||
}
|
||||
// *herojobs.RedisClient must implement RedisClientInterface.
|
||||
// This assignment is valid if *herojobs.RedisClient has all methods of RedisClientInterface.
|
||||
return &JobHandler{
|
||||
client: redisClient,
|
||||
logger: logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RegisterRoutes registers job API routes
|
||||
func (h *JobHandler) RegisterRoutes(app *fiber.App) {
|
||||
// Register common routes to both API and admin groups
|
||||
jobRoutes := func(group fiber.Router) {
|
||||
group.Post("/submit", h.submitJob)
|
||||
group.Get("/get/:id", h.getJob)
|
||||
group.Delete("/delete/:id", h.deleteJob)
|
||||
group.Get("/list", h.listJobs)
|
||||
group.Get("/queue/size", h.queueSize)
|
||||
group.Post("/queue/empty", h.queueEmpty)
|
||||
group.Get("/queue/get", h.queueGet)
|
||||
group.Post("/create", h.createJob)
|
||||
}
|
||||
|
||||
// Apply common routes to API group
|
||||
apiJobs := app.Group("/api/jobs")
|
||||
jobRoutes(apiJobs)
|
||||
|
||||
// Apply common routes to admin group
|
||||
adminJobs := app.Group("/admin/jobs")
|
||||
jobRoutes(adminJobs)
|
||||
}
|
||||
|
||||
// @Summary Submit a job
|
||||
// @Description Submit a new job to the HeroJobs server
|
||||
// @Tags jobs
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param job body herojobs.Job true "Job to submit"
|
||||
// @Success 200 {object} herojobs.Job
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/submit [post]
|
||||
// @Router /admin/jobs/submit [post]
|
||||
func (h *JobHandler) submitJob(c *fiber.Ctx) error {
|
||||
// Parse job from request body
|
||||
var job herojobs.Job
|
||||
if err := c.BodyParser(&job); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to parse job data: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Save job to OurDB (this assigns/confirms JobID)
|
||||
if err := job.Save(); err != nil {
|
||||
h.logger.Printf("Failed to save job to OurDB: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to save job: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Store job in Redis
|
||||
if err := h.client.StoreJob(&job); err != nil {
|
||||
h.logger.Printf("Failed to store job in Redis: %v", err)
|
||||
// Attempt to roll back or log, but proceed to enqueue if critical
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to store job in Redis: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Enqueue job in Redis
|
||||
if err := h.client.EnqueueJob(&job); err != nil {
|
||||
h.logger.Printf("Failed to enqueue job in Redis: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to enqueue job: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(job)
|
||||
}
|
||||
|
||||
// @Summary Get a job
|
||||
// @Description Get a job by ID
|
||||
// @Tags jobs
|
||||
// @Produce json
|
||||
// @Param id path string true "Job ID"
|
||||
// @Success 200 {object} herojobs.Job
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/get/{id} [get]
|
||||
// @Router /admin/jobs/get/{id} [get]
|
||||
func (h *JobHandler) getJob(c *fiber.Ctx) error {
|
||||
// Get job ID from path parameter
|
||||
jobIDStr := c.Params("id")
|
||||
if jobIDStr == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Job ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Convert jobID string to uint32
|
||||
jobID64, err := strconv.ParseUint(jobIDStr, 10, 32)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Invalid Job ID format: %s. %v", jobIDStr, err),
|
||||
})
|
||||
}
|
||||
jobID := uint32(jobID64)
|
||||
|
||||
// Get job from Redis first
|
||||
job, err := h.client.GetJob(jobID)
|
||||
if err != nil {
|
||||
// If not found in Redis (e.g. redis.Nil or other error), try OurDB
|
||||
h.logger.Printf("Job %d not found in Redis or error: %v. Trying OurDB.", jobID, err)
|
||||
retrievedJob := &herojobs.Job{JobID: jobID}
|
||||
if loadErr := retrievedJob.Load(); loadErr != nil {
|
||||
h.logger.Printf("Failed to load job %d from OurDB: %v", jobID, loadErr)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to get job %d: %v / %v", jobID, err, loadErr),
|
||||
})
|
||||
}
|
||||
job = retrievedJob // Use the job loaded from OurDB
|
||||
}
|
||||
|
||||
return c.JSON(job)
|
||||
}
|
||||
|
||||
// @Summary Delete a job
|
||||
// @Description Delete a job by ID
|
||||
// @Tags jobs
|
||||
// @Produce json
|
||||
// @Param id path string true "Job ID"
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/delete/{id} [delete]
|
||||
// @Router /admin/jobs/delete/{id} [delete]
|
||||
func (h *JobHandler) deleteJob(c *fiber.Ctx) error {
|
||||
// Get job ID from path parameter
|
||||
jobIDStr := c.Params("id")
|
||||
if jobIDStr == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Job ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Deleting jobs requires removing from OurDB and Redis.
|
||||
// This functionality is not directly provided by RedisClient.DeleteJob
|
||||
// and OurDB job deletion is not specified in README.
|
||||
// For now, returning not implemented.
|
||||
h.logger.Printf("Attempt to delete job %s - not implemented", jobIDStr)
|
||||
return c.Status(fiber.StatusNotImplemented).JSON(fiber.Map{
|
||||
"error": "Job deletion is not implemented",
|
||||
"message": fmt.Sprintf("Job %s deletion requested but not implemented.", jobIDStr),
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary List jobs
|
||||
// @Description List jobs by circle ID and topic
|
||||
// @Tags jobs
|
||||
// @Produce json
|
||||
// @Param circleid query string true "Circle ID"
|
||||
// @Param topic query string true "Topic"
|
||||
// @Success 200 {object} map[string][]string
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/list [get]
|
||||
// @Router /admin/jobs/list [get]
|
||||
func (h *JobHandler) listJobs(c *fiber.Ctx) error {
|
||||
// Get parameters from query
|
||||
circleID := c.Query("circleid")
|
||||
if circleID == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Circle ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
topic := c.Query("topic")
|
||||
if topic == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Topic is required",
|
||||
})
|
||||
}
|
||||
|
||||
// List jobs
|
||||
jobs, err := h.client.ListJobs(circleID, topic)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to list jobs: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"status": "success",
|
||||
"jobs": jobs,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Get queue size
|
||||
// @Description Get the size of a job queue by circle ID and topic
|
||||
// @Tags jobs
|
||||
// @Produce json
|
||||
// @Param circleid query string true "Circle ID"
|
||||
// @Param topic query string true "Topic"
|
||||
// @Success 200 {object} map[string]int64
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/queue/size [get]
|
||||
// @Router /admin/jobs/queue/size [get]
|
||||
func (h *JobHandler) queueSize(c *fiber.Ctx) error {
|
||||
// Get parameters from query
|
||||
circleID := c.Query("circleid")
|
||||
if circleID == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Circle ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
topic := c.Query("topic")
|
||||
if topic == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Topic is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Get queue size
|
||||
size, err := h.client.QueueSize(circleID, topic)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to get queue size: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"status": "success",
|
||||
"size": size,
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Empty queue
|
||||
// @Description Empty a job queue by circle ID and topic
|
||||
// @Tags jobs
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param body body object true "Queue parameters"
|
||||
// @Success 200 {object} map[string]string
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/queue/empty [post]
|
||||
// @Router /admin/jobs/queue/empty [post]
|
||||
func (h *JobHandler) queueEmpty(c *fiber.Ctx) error {
|
||||
// Parse parameters from request body
|
||||
var params struct {
|
||||
CircleID string `json:"circleid"`
|
||||
Topic string `json:"topic"`
|
||||
}
|
||||
if err := c.BodyParser(¶ms); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to parse parameters: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if params.CircleID == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Circle ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
if params.Topic == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Topic is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Empty queue
|
||||
if err := h.client.QueueEmpty(params.CircleID, params.Topic); err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to empty queue: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"status": "success",
|
||||
"message": fmt.Sprintf("Queue for circle %s and topic %s emptied successfully", params.CircleID, params.Topic),
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Get job from queue
|
||||
// @Description Get a job from a queue without removing it
|
||||
// @Tags jobs
|
||||
// @Produce json
|
||||
// @Param circleid query string true "Circle ID"
|
||||
// @Param topic query string true "Topic"
|
||||
// @Success 200 {object} herojobs.Job
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/queue/get [get]
|
||||
// @Router /admin/jobs/queue/get [get]
|
||||
func (h *JobHandler) queueGet(c *fiber.Ctx) error {
|
||||
// Get parameters from query
|
||||
circleID := c.Query("circleid")
|
||||
if circleID == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Circle ID is required",
|
||||
})
|
||||
}
|
||||
|
||||
topic := c.Query("topic")
|
||||
if topic == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Topic is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Get list of job IDs (uint32) from the queue (non-destructive)
|
||||
jobIDs, err := h.client.ListJobs(circleID, topic)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to list jobs in queue: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if len(jobIDs) == 0 {
|
||||
return c.Status(fiber.StatusNotFound).JSON(fiber.Map{
|
||||
"error": "Queue is empty or no jobs found",
|
||||
})
|
||||
}
|
||||
|
||||
// Take the first job ID from the list (it's already uint32)
|
||||
jobIDToFetch := jobIDs[0]
|
||||
|
||||
// Get the actual job details using the ID
|
||||
job, err := h.client.GetJob(jobIDToFetch)
|
||||
if err != nil {
|
||||
// If not found in Redis (e.g. redis.Nil or other error), try OurDB
|
||||
h.logger.Printf("Job %d (from queue list) not found in Redis or error: %v. Trying OurDB.", jobIDToFetch, err)
|
||||
retrievedJob := &herojobs.Job{JobID: jobIDToFetch} // Ensure CircleID and Topic are set if Load needs them
|
||||
retrievedJob.CircleID = circleID // Needed for Load if path depends on it
|
||||
retrievedJob.Topic = topic // Needed for Load if path depends on it
|
||||
if loadErr := retrievedJob.Load(); loadErr != nil {
|
||||
h.logger.Printf("Failed to load job %d from OurDB: %v", jobIDToFetch, loadErr)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to get job %d from queue (Redis err: %v / OurDB err: %v)", jobIDToFetch, err, loadErr),
|
||||
})
|
||||
}
|
||||
job = retrievedJob // Use the job loaded from OurDB
|
||||
}
|
||||
|
||||
return c.JSON(job)
|
||||
}
|
||||
|
||||
// @Summary Create job
|
||||
// @Description Create a new job with the given parameters
|
||||
// @Tags jobs
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param body body object true "Job parameters"
|
||||
// @Success 200 {object} herojobs.Job
|
||||
// @Failure 400 {object} map[string]string
|
||||
// @Failure 500 {object} map[string]string
|
||||
// @Router /api/jobs/create [post]
|
||||
// @Router /admin/jobs/create [post]
|
||||
func (h *JobHandler) createJob(c *fiber.Ctx) error {
|
||||
// Parse parameters from request body
|
||||
var reqBody struct {
|
||||
CircleID string `json:"circleid"`
|
||||
Topic string `json:"topic"`
|
||||
SessionKey string `json:"sessionkey"`
|
||||
Params string `json:"params"`
|
||||
ParamsType string `json:"paramstype"`
|
||||
Timeout int64 `json:"timeout"` // Optional: allow timeout override
|
||||
Log bool `json:"log"` // Optional: allow log enabling
|
||||
}
|
||||
if err := c.BodyParser(&reqBody); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to parse parameters: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if reqBody.CircleID == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Circle ID is required",
|
||||
})
|
||||
}
|
||||
if reqBody.Topic == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Topic is required",
|
||||
})
|
||||
}
|
||||
if reqBody.Params == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "Params are required",
|
||||
})
|
||||
}
|
||||
if reqBody.ParamsType == "" {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": "ParamsType is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Create a new job instance
|
||||
job := herojobs.NewJob() // Initializes with defaults
|
||||
job.CircleID = reqBody.CircleID
|
||||
job.Topic = reqBody.Topic
|
||||
job.SessionKey = reqBody.SessionKey
|
||||
job.Params = reqBody.Params
|
||||
|
||||
// Convert ParamsType string to herojobs.ParamsType
|
||||
switch herojobs.ParamsType(reqBody.ParamsType) {
|
||||
case herojobs.ParamsTypeHeroScript:
|
||||
job.ParamsType = herojobs.ParamsTypeHeroScript
|
||||
case herojobs.ParamsTypeRhaiScript:
|
||||
job.ParamsType = herojobs.ParamsTypeRhaiScript
|
||||
case herojobs.ParamsTypeOpenRPC:
|
||||
job.ParamsType = herojobs.ParamsTypeOpenRPC
|
||||
case herojobs.ParamsTypeAI:
|
||||
job.ParamsType = herojobs.ParamsTypeAI
|
||||
default:
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Invalid ParamsType: %s", reqBody.ParamsType),
|
||||
})
|
||||
}
|
||||
|
||||
if reqBody.Timeout > 0 {
|
||||
job.Timeout = reqBody.Timeout
|
||||
}
|
||||
job.Log = reqBody.Log
|
||||
|
||||
// Save job to OurDB (this assigns JobID)
|
||||
if err := job.Save(); err != nil {
|
||||
h.logger.Printf("Failed to save new job to OurDB: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to save new job: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Store job in Redis
|
||||
if err := h.client.StoreJob(job); err != nil {
|
||||
h.logger.Printf("Failed to store new job in Redis: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to store new job in Redis: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
// Enqueue job in Redis
|
||||
if err := h.client.EnqueueJob(job); err != nil {
|
||||
h.logger.Printf("Failed to enqueue new job in Redis: %v", err)
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to enqueue new job: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(job)
|
||||
}
|
572
_pkg2_dont_use/heroagent/handlers/job_handlers_test.go
Normal file
572
_pkg2_dont_use/heroagent/handlers/job_handlers_test.go
Normal file
@@ -0,0 +1,572 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/herojobs"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockRedisClient is a mock implementation of the RedisClientInterface
|
||||
type MockRedisClient struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// StoreJob mocks the StoreJob method
|
||||
func (m *MockRedisClient) StoreJob(job *herojobs.Job) error {
|
||||
args := m.Called(job)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// EnqueueJob mocks the EnqueueJob method
|
||||
func (m *MockRedisClient) EnqueueJob(job *herojobs.Job) error {
|
||||
args := m.Called(job)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// GetJob mocks the GetJob method
|
||||
func (m *MockRedisClient) GetJob(jobID interface{}) (*herojobs.Job, error) { // jobID is interface{}
|
||||
args := m.Called(jobID)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(*herojobs.Job), args.Error(1)
|
||||
}
|
||||
|
||||
// ListJobs mocks the ListJobs method
|
||||
func (m *MockRedisClient) ListJobs(circleID, topic string) ([]uint32, error) { // Returns []uint32
|
||||
args := m.Called(circleID, topic)
|
||||
if args.Get(0) == nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
return args.Get(0).([]uint32), args.Error(1)
|
||||
}
|
||||
|
||||
// QueueSize mocks the QueueSize method
|
||||
func (m *MockRedisClient) QueueSize(circleID, topic string) (int64, error) {
|
||||
args := m.Called(circleID, topic)
|
||||
// Ensure Get(0) is not nil before type assertion if it can be nil in some error cases
|
||||
if args.Get(0) == nil && args.Error(1) != nil { // If error is set, result might be nil
|
||||
return 0, args.Error(1)
|
||||
}
|
||||
return args.Get(0).(int64), args.Error(1)
|
||||
}
|
||||
|
||||
// QueueEmpty mocks the QueueEmpty method
|
||||
func (m *MockRedisClient) QueueEmpty(circleID, topic string) error {
|
||||
args := m.Called(circleID, topic)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// setupTest initializes a test environment with a mock client
|
||||
func setupTest() (*JobHandler, *MockRedisClient, *fiber.App) {
|
||||
mockClient := new(MockRedisClient)
|
||||
handler := &JobHandler{
|
||||
client: mockClient, // Assign the mock that implements RedisClientInterface
|
||||
}
|
||||
|
||||
app := fiber.New()
|
||||
|
||||
// Register routes (ensure these match the actual routes in job_handlers.go)
|
||||
apiJobs := app.Group("/api/jobs") // Assuming routes are under /api/jobs
|
||||
apiJobs.Post("/submit", handler.submitJob)
|
||||
apiJobs.Get("/get/:id", handler.getJob) // :id as per job_handlers.go
|
||||
apiJobs.Delete("/delete/:id", handler.deleteJob) // :id as per job_handlers.go
|
||||
apiJobs.Get("/list", handler.listJobs)
|
||||
apiJobs.Get("/queue/size", handler.queueSize)
|
||||
apiJobs.Post("/queue/empty", handler.queueEmpty)
|
||||
apiJobs.Get("/queue/get", handler.queueGet)
|
||||
apiJobs.Post("/create", handler.createJob)
|
||||
|
||||
// If admin routes are also tested, they need to be registered here too
|
||||
// adminJobs := app.Group("/admin/jobs")
|
||||
// jobRoutes(adminJobs) // if using the same handler instance
|
||||
|
||||
return handler, mockClient, app
|
||||
}
|
||||
|
||||
// createTestRequest creates a test request with the given method, path, and body
|
||||
func createTestRequest(method, path string, body io.Reader) (*http.Request, error) {
|
||||
req := httptest.NewRequest(method, path, body)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// TestQueueEmpty tests the queueEmpty handler
|
||||
func TestQueueEmpty(t *testing.T) {
|
||||
// Test cases
|
||||
tests := []struct {
|
||||
name string
|
||||
circleID string
|
||||
topic string
|
||||
emptyError error
|
||||
expectedStatus int
|
||||
expectedBody string
|
||||
}{
|
||||
{
|
||||
name: "Success",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
emptyError: nil,
|
||||
expectedStatus: fiber.StatusOK,
|
||||
expectedBody: `{"status":"success","message":"Queue for circle test-circle and topic test-topic emptied successfully"}`,
|
||||
},
|
||||
// Removed "Connection Error" test case as Connect is no longer directly called per op
|
||||
{
|
||||
name: "Empty Error",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
emptyError: errors.New("empty error"),
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to empty queue: empty error"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Circle ID",
|
||||
circleID: "",
|
||||
topic: "test-topic",
|
||||
emptyError: nil,
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Circle ID is required"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Topic",
|
||||
circleID: "test-circle",
|
||||
topic: "",
|
||||
emptyError: nil,
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Topic is required"}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a new mock client for each test and setup app
|
||||
_, mockClient, app := setupTest() // Use setupTest to get handler with mock
|
||||
|
||||
// Setup mock expectations
|
||||
if tc.circleID != "" && tc.topic != "" { // Only expect call if params are valid
|
||||
mockClient.On("QueueEmpty", tc.circleID, tc.topic).Return(tc.emptyError)
|
||||
}
|
||||
|
||||
// Create request body
|
||||
reqBody := map[string]string{
|
||||
"circleid": tc.circleID,
|
||||
"topic": tc.topic,
|
||||
}
|
||||
reqBodyBytes, err := json.Marshal(reqBody)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create test request
|
||||
req, err := createTestRequest(http.MethodPost, "/api/jobs/queue/empty", bytes.NewReader(reqBodyBytes))
|
||||
assert.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
// Perform the request
|
||||
resp, err := app.Test(req)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check status code
|
||||
assert.Equal(t, tc.expectedStatus, resp.StatusCode)
|
||||
|
||||
// Check response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.JSONEq(t, tc.expectedBody, string(body))
|
||||
|
||||
// Verify that all expectations were met
|
||||
mockClient.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestQueueGet tests the queueGet handler
|
||||
func TestQueueGet(t *testing.T) {
|
||||
// Create a test job
|
||||
testJob := herojobs.NewJob()
|
||||
testJob.JobID = 10 // This will be a number in JSON
|
||||
testJob.CircleID = "test-circle"
|
||||
testJob.Topic = "test-topic"
|
||||
testJob.Params = "some script"
|
||||
testJob.ParamsType = herojobs.ParamsTypeHeroScript
|
||||
testJob.Status = herojobs.JobStatusNew
|
||||
|
||||
// Test cases
|
||||
tests := []struct {
|
||||
name string
|
||||
circleID string
|
||||
topic string
|
||||
listJobsError error
|
||||
listJobsResp []uint32
|
||||
getJobError error
|
||||
getJobResp *herojobs.Job
|
||||
expectedStatus int
|
||||
expectedBody string // This will need to be updated to match the actual job structure
|
||||
}{
|
||||
{
|
||||
name: "Success",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
listJobsError: nil,
|
||||
listJobsResp: []uint32{10},
|
||||
getJobError: nil,
|
||||
getJobResp: testJob,
|
||||
expectedStatus: fiber.StatusOK,
|
||||
expectedBody: `{"jobid":10,"circleid":"test-circle","topic":"test-topic","params":"some script","paramstype":"HeroScript","status":"new","sessionkey":"","result":"","error":"","timeout":60,"log":false,"timescheduled":0,"timestart":0,"timeend":0}`,
|
||||
},
|
||||
// Removed "Connection Error"
|
||||
{
|
||||
name: "ListJobs Error",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
listJobsError: errors.New("list error"),
|
||||
listJobsResp: nil,
|
||||
getJobError: nil, // Not reached
|
||||
getJobResp: nil, // Not reached
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to list jobs in queue: list error"}`,
|
||||
},
|
||||
{
|
||||
name: "GetJob Error after ListJobs success",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
listJobsError: nil,
|
||||
listJobsResp: []uint32{10},
|
||||
getJobError: errors.New("get error"),
|
||||
getJobResp: nil,
|
||||
expectedStatus: fiber.StatusInternalServerError, // Or based on how GetJob error is handled (e.g. fallback to OurDB)
|
||||
// The error message might be more complex if OurDB load is also attempted and fails
|
||||
expectedBody: `{"error":"Failed to get job 10 from queue (Redis err: get error / OurDB err: record not found)"}`, // Adjusted expected error
|
||||
},
|
||||
{
|
||||
name: "Queue Empty (ListJobs returns empty)",
|
||||
circleID: "test-circle",
|
||||
topic: "test-topic",
|
||||
listJobsError: nil,
|
||||
listJobsResp: []uint32{}, // Empty list
|
||||
getJobError: nil,
|
||||
getJobResp: nil,
|
||||
expectedStatus: fiber.StatusNotFound,
|
||||
expectedBody: `{"error":"Queue is empty or no jobs found"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Circle ID",
|
||||
circleID: "",
|
||||
topic: "test-topic",
|
||||
listJobsError: nil,
|
||||
listJobsResp: nil,
|
||||
getJobError: nil,
|
||||
getJobResp: nil,
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Circle ID is required"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Topic",
|
||||
circleID: "test-circle",
|
||||
topic: "",
|
||||
listJobsError: nil,
|
||||
listJobsResp: nil,
|
||||
getJobError: nil,
|
||||
getJobResp: nil,
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Topic is required"}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a new mock client for each test and setup app
|
||||
_, mockClient, app := setupTest()
|
||||
|
||||
// Setup mock expectations
|
||||
if tc.circleID != "" && tc.topic != "" {
|
||||
mockClient.On("ListJobs", tc.circleID, tc.topic).Return(tc.listJobsResp, tc.listJobsError)
|
||||
if tc.listJobsError == nil && len(tc.listJobsResp) > 0 {
|
||||
// Expect GetJob to be called with the first ID from listJobsResp
|
||||
// The handler passes uint32 to client.GetJob, which matches interface{}
|
||||
mockClient.On("GetJob", tc.listJobsResp[0]).Return(tc.getJobResp, tc.getJobError).Maybe()
|
||||
// If GetJob from Redis fails, a Load from OurDB is attempted.
|
||||
// We are not mocking job.Load() here as it's on the job object.
|
||||
// The error message in the test case reflects this potential dual failure.
|
||||
}
|
||||
}
|
||||
|
||||
// Create test request
|
||||
path := fmt.Sprintf("/api/jobs/queue/get?circleid=%s&topic=%s", tc.circleID, tc.topic)
|
||||
req, err := createTestRequest(http.MethodGet, path, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform the request
|
||||
resp, err := app.Test(req)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check status code
|
||||
assert.Equal(t, tc.expectedStatus, resp.StatusCode)
|
||||
|
||||
// Check response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.JSONEq(t, tc.expectedBody, string(body))
|
||||
|
||||
// Verify that all expectations were met
|
||||
mockClient.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCreateJob tests the createJob handler
|
||||
func TestCreateJob(t *testing.T) {
|
||||
// Test cases
|
||||
createdJob := herojobs.NewJob()
|
||||
createdJob.JobID = 10 // Assuming Save will populate this; for mock, we set it
|
||||
createdJob.CircleID = "test-circle"
|
||||
createdJob.Topic = "test-topic"
|
||||
createdJob.SessionKey = "test-key"
|
||||
createdJob.Params = "test-params"
|
||||
createdJob.ParamsType = herojobs.ParamsTypeHeroScript // Match "HeroScript" string
|
||||
createdJob.Status = herojobs.JobStatusNew // Default status after NewJob and Save
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
reqBody map[string]interface{} // Use map for flexibility
|
||||
storeError error
|
||||
enqueueError error
|
||||
expectedStatus int
|
||||
expectedBody string // Will be the createdJob marshaled
|
||||
}{
|
||||
{
|
||||
name: "Success",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "test-circle",
|
||||
"topic": "test-topic",
|
||||
"sessionkey": "test-key",
|
||||
"params": "test-params",
|
||||
"paramstype": "HeroScript",
|
||||
"timeout": 30,
|
||||
"log": true,
|
||||
},
|
||||
storeError: nil,
|
||||
enqueueError: nil,
|
||||
expectedStatus: fiber.StatusOK,
|
||||
// Expected body should match the 'createdJob' structure after Save, Store, Enqueue
|
||||
// JobID is assigned by Save(), which we are not mocking here.
|
||||
// The handler returns the job object.
|
||||
// For the test, we assume Save() works and populates JobID if it were a real DB.
|
||||
// The mock will return the job passed to it.
|
||||
expectedBody: `{"jobid":0,"circleid":"test-circle","topic":"test-topic","params":"test-params","paramstype":"HeroScript","status":"new","sessionkey":"test-key","result":"","error":"","timeout":30,"log":true,"timescheduled":0,"timestart":0,"timeend":0}`,
|
||||
},
|
||||
// Removed "Connection Error"
|
||||
{
|
||||
name: "StoreJob Error",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "test-circle", "topic": "test-topic", "params": "p", "paramstype": "HeroScript",
|
||||
},
|
||||
storeError: errors.New("store error"),
|
||||
enqueueError: nil,
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to store new job in Redis: store error"}`,
|
||||
},
|
||||
{
|
||||
name: "EnqueueJob Error",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "test-circle", "topic": "test-topic", "params": "p", "paramstype": "HeroScript",
|
||||
},
|
||||
storeError: nil,
|
||||
enqueueError: errors.New("enqueue error"),
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to enqueue new job in Redis: enqueue error"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Circle ID",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "", "topic": "test-topic", "params": "p", "paramstype": "HeroScript",
|
||||
},
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Circle ID is required"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Topic",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "c", "topic": "", "params": "p", "paramstype": "HeroScript",
|
||||
},
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Topic is required"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Params",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "c", "topic": "t", "params": "", "paramstype": "HeroScript",
|
||||
},
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Params are required"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty ParamsType",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "c", "topic": "t", "params": "p", "paramstype": "",
|
||||
},
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"ParamsType is required"}`,
|
||||
},
|
||||
{
|
||||
name: "Invalid ParamsType",
|
||||
reqBody: map[string]interface{}{
|
||||
"circleid": "c", "topic": "t", "params": "p", "paramstype": "InvalidType",
|
||||
},
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Invalid ParamsType: InvalidType"}`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, mockClient, app := setupTest()
|
||||
|
||||
// Setup mock expectations
|
||||
// job.Save() is called before client interactions. We assume it succeeds for these tests.
|
||||
// The mock will be called with a job object. We use mock.AnythingOfType for the job
|
||||
// because the JobID might be populated by Save() in a real scenario, making exact match hard.
|
||||
if tc.reqBody["circleid"] != "" && tc.reqBody["topic"] != "" &&
|
||||
tc.reqBody["params"] != "" && tc.reqBody["paramstype"] != "" &&
|
||||
herojobs.ParamsType(tc.reqBody["paramstype"].(string)) != "" { // Basic validation check
|
||||
|
||||
// We expect StoreJob to be called with a *herojobs.Job.
|
||||
// The actual JobID is set by job.Save() which is not mocked here.
|
||||
// So we use mock.AnythingOfType to match the argument.
|
||||
mockClient.On("StoreJob", mock.AnythingOfType("*herojobs.Job")).Return(tc.storeError).Once().Maybe()
|
||||
|
||||
if tc.storeError == nil {
|
||||
mockClient.On("EnqueueJob", mock.AnythingOfType("*herojobs.Job")).Return(tc.enqueueError).Once().Maybe()
|
||||
}
|
||||
}
|
||||
|
||||
reqBodyBytes, err := json.Marshal(tc.reqBody)
|
||||
assert.NoError(t, err)
|
||||
|
||||
req, err := createTestRequest(http.MethodPost, "/api/jobs/create", bytes.NewReader(reqBodyBytes)) // Use /api/jobs/create
|
||||
assert.NoError(t, err)
|
||||
// Content-Type is set by createTestRequest
|
||||
|
||||
// Perform the request
|
||||
resp, err := app.Test(req)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check status code
|
||||
assert.Equal(t, tc.expectedStatus, resp.StatusCode)
|
||||
|
||||
// Check response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.JSONEq(t, tc.expectedBody, string(body))
|
||||
|
||||
// Verify that all expectations were met
|
||||
mockClient.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSubmitJob tests the submitJob handler
|
||||
func TestSubmitJob(t *testing.T) {
|
||||
// Test cases
|
||||
submittedJob := herojobs.NewJob()
|
||||
submittedJob.JobID = 10 // Assume Save populates this
|
||||
submittedJob.CircleID = "test-circle"
|
||||
submittedJob.Topic = "test-topic"
|
||||
submittedJob.Params = "submitted params"
|
||||
submittedJob.ParamsType = herojobs.ParamsTypeHeroScript
|
||||
submittedJob.Status = herojobs.JobStatusNew
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
jobToSubmit *herojobs.Job // This is the job in the request body
|
||||
storeError error
|
||||
enqueueError error
|
||||
expectedStatus int
|
||||
expectedBody string // Will be the jobToSubmit marshaled (after potential Save)
|
||||
}{
|
||||
{
|
||||
name: "Success",
|
||||
jobToSubmit: submittedJob,
|
||||
storeError: nil,
|
||||
enqueueError: nil,
|
||||
expectedStatus: fiber.StatusOK,
|
||||
// The handler returns the job object from the request after Save(), Store(), Enqueue()
|
||||
// For the mock, the JobID from jobToSubmit will be used.
|
||||
expectedBody: `{"jobid":10,"circleid":"test-circle","topic":"test-topic","params":"submitted params","paramstype":"HeroScript","status":"new","sessionkey":"","result":"","error":"","timeout":60,"log":false,"timescheduled":0,"timestart":0,"timeend":0}`,
|
||||
},
|
||||
// Removed "Connection Error"
|
||||
{
|
||||
name: "StoreJob Error",
|
||||
jobToSubmit: submittedJob,
|
||||
storeError: errors.New("store error"),
|
||||
enqueueError: nil,
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to store job in Redis: store error"}`,
|
||||
},
|
||||
{
|
||||
name: "EnqueueJob Error",
|
||||
jobToSubmit: submittedJob,
|
||||
storeError: nil,
|
||||
enqueueError: errors.New("enqueue error"),
|
||||
expectedStatus: fiber.StatusInternalServerError,
|
||||
expectedBody: `{"error":"Failed to enqueue job: enqueue error"}`,
|
||||
},
|
||||
{
|
||||
name: "Empty Job in request (parsing error)",
|
||||
jobToSubmit: nil, // Simulates empty or malformed request body
|
||||
expectedStatus: fiber.StatusBadRequest,
|
||||
expectedBody: `{"error":"Failed to parse job data: unexpected end of JSON input"}`, // Or similar based on actual parsing
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, mockClient, app := setupTest()
|
||||
|
||||
// Setup mock expectations
|
||||
// job.Save() is called before client interactions.
|
||||
if tc.jobToSubmit != nil { // If job is parsable from request
|
||||
// We expect StoreJob to be called with the job from the request.
|
||||
// The JobID might be modified by Save() in a real scenario.
|
||||
mockClient.On("StoreJob", tc.jobToSubmit).Return(tc.storeError).Once().Maybe()
|
||||
if tc.storeError == nil {
|
||||
mockClient.On("EnqueueJob", tc.jobToSubmit).Return(tc.enqueueError).Once().Maybe()
|
||||
}
|
||||
}
|
||||
|
||||
var reqBodyBytes []byte
|
||||
var err error
|
||||
if tc.jobToSubmit != nil {
|
||||
reqBodyBytes, err = json.Marshal(tc.jobToSubmit)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
req, err := createTestRequest(http.MethodPost, "/api/jobs/submit", bytes.NewReader(reqBodyBytes)) // Use /api/jobs/submit
|
||||
assert.NoError(t, err)
|
||||
// Content-Type is set by createTestRequest
|
||||
|
||||
// Perform the request
|
||||
resp, err := app.Test(req)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check status code
|
||||
assert.Equal(t, tc.expectedStatus, resp.StatusCode)
|
||||
|
||||
// Check response body
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
assert.JSONEq(t, tc.expectedBody, string(body))
|
||||
|
||||
// Verify that all expectations were met
|
||||
mockClient.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
554
_pkg2_dont_use/heroagent/handlers/log_handlers.go
Normal file
554
_pkg2_dont_use/heroagent/handlers/log_handlers.go
Normal file
@@ -0,0 +1,554 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/logger"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// LogHandler handles log-related routes
|
||||
type LogHandler struct {
|
||||
systemLogger *logger.Logger
|
||||
serviceLogger *logger.Logger
|
||||
jobLogger *logger.Logger
|
||||
processLogger *logger.Logger
|
||||
logBasePath string
|
||||
}
|
||||
|
||||
// NewLogHandler creates a new LogHandler
|
||||
func NewLogHandler(logPath string) (*LogHandler, error) {
|
||||
// Create base directories for different log types
|
||||
systemLogPath := filepath.Join(logPath, "system")
|
||||
serviceLogPath := filepath.Join(logPath, "services")
|
||||
jobLogPath := filepath.Join(logPath, "jobs")
|
||||
processLogPath := filepath.Join(logPath, "processes")
|
||||
|
||||
// Create logger instances for each type
|
||||
systemLogger, err := logger.New(systemLogPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create system logger: %w", err)
|
||||
}
|
||||
|
||||
serviceLogger, err := logger.New(serviceLogPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create service logger: %w", err)
|
||||
}
|
||||
|
||||
jobLogger, err := logger.New(jobLogPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create job logger: %w", err)
|
||||
}
|
||||
|
||||
processLogger, err := logger.New(processLogPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create process logger: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Log handler created successfully with paths:\n System: %s\n Services: %s\n Jobs: %s\n Processes: %s\n",
|
||||
systemLogPath, serviceLogPath, jobLogPath, processLogPath)
|
||||
|
||||
return &LogHandler{
|
||||
systemLogger: systemLogger,
|
||||
serviceLogger: serviceLogger,
|
||||
jobLogger: jobLogger,
|
||||
processLogger: processLogger,
|
||||
logBasePath: logPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LogType represents the type of logs to retrieve
|
||||
type LogType string
|
||||
|
||||
const (
|
||||
LogTypeSystem LogType = "system"
|
||||
LogTypeService LogType = "service"
|
||||
LogTypeJob LogType = "job"
|
||||
LogTypeProcess LogType = "process"
|
||||
LogTypeAll LogType = "all" // Special type to retrieve logs from all sources
|
||||
)
|
||||
|
||||
// GetLogs renders the logs page with logs content
|
||||
func (h *LogHandler) GetLogs(c *fiber.Ctx) error {
|
||||
// Check which logger to use based on the log type parameter
|
||||
logTypeParam := c.Query("log_type", string(LogTypeSystem))
|
||||
|
||||
// Parse query parameters
|
||||
category := c.Query("category", "")
|
||||
logItemType := parseLogType(c.Query("type", ""))
|
||||
maxItems := c.QueryInt("max_items", 100)
|
||||
page := c.QueryInt("page", 1)
|
||||
itemsPerPage := 20 // Default items per page
|
||||
|
||||
// Parse time range
|
||||
fromTime := parseTimeParam(c.Query("from", ""))
|
||||
toTime := parseTimeParam(c.Query("to", ""))
|
||||
|
||||
// Create search arguments
|
||||
searchArgs := logger.SearchArgs{
|
||||
Category: category,
|
||||
LogType: logItemType,
|
||||
MaxItems: maxItems,
|
||||
}
|
||||
|
||||
if !fromTime.IsZero() {
|
||||
searchArgs.TimestampFrom = &fromTime
|
||||
}
|
||||
|
||||
if !toTime.IsZero() {
|
||||
searchArgs.TimestampTo = &toTime
|
||||
}
|
||||
|
||||
// Variables for logs and error
|
||||
var logs []logger.LogItem
|
||||
var err error
|
||||
var logTypeTitle string
|
||||
|
||||
// Check if we want to merge logs from all sources
|
||||
if LogType(logTypeParam) == LogTypeAll {
|
||||
// Get merged logs from all loggers
|
||||
logs, err = h.getMergedLogs(searchArgs)
|
||||
logTypeTitle = "All Logs"
|
||||
} else {
|
||||
// Select the appropriate logger based on the log type
|
||||
var selectedLogger *logger.Logger
|
||||
|
||||
switch LogType(logTypeParam) {
|
||||
case LogTypeService:
|
||||
selectedLogger = h.serviceLogger
|
||||
logTypeTitle = "Service Logs"
|
||||
case LogTypeJob:
|
||||
selectedLogger = h.jobLogger
|
||||
logTypeTitle = "Job Logs"
|
||||
case LogTypeProcess:
|
||||
selectedLogger = h.processLogger
|
||||
logTypeTitle = "Process Logs"
|
||||
default:
|
||||
selectedLogger = h.systemLogger
|
||||
logTypeTitle = "System Logs"
|
||||
}
|
||||
|
||||
// Check if the selected logger is properly initialized
|
||||
if selectedLogger == nil {
|
||||
return c.Render("admin/system/logs", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"error": "Logger not initialized",
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
})
|
||||
}
|
||||
|
||||
// Search for logs using the selected logger
|
||||
logs, err = selectedLogger.Search(searchArgs)
|
||||
}
|
||||
|
||||
// Handle search error
|
||||
if err != nil {
|
||||
return c.Render("admin/system/logs", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"error": err.Error(),
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
})
|
||||
}
|
||||
|
||||
// Calculate total pages
|
||||
totalLogs := len(logs)
|
||||
totalPages := (totalLogs + itemsPerPage - 1) / itemsPerPage
|
||||
|
||||
// Apply pagination
|
||||
startIndex := (page - 1) * itemsPerPage
|
||||
endIndex := startIndex + itemsPerPage
|
||||
if endIndex > totalLogs {
|
||||
endIndex = totalLogs
|
||||
}
|
||||
|
||||
// Slice logs for current page
|
||||
pagedLogs := logs
|
||||
if startIndex < totalLogs {
|
||||
pagedLogs = logs[startIndex:endIndex]
|
||||
} else {
|
||||
pagedLogs = []logger.LogItem{}
|
||||
}
|
||||
|
||||
// Convert logs to a format suitable for the UI
|
||||
formattedLogs := make([]fiber.Map, 0, len(pagedLogs))
|
||||
for _, log := range pagedLogs {
|
||||
logTypeStr := "INFO"
|
||||
logTypeClass := "log-info"
|
||||
if log.LogType == logger.LogTypeError {
|
||||
logTypeStr = "ERROR"
|
||||
logTypeClass = "log-error"
|
||||
}
|
||||
|
||||
formattedLogs = append(formattedLogs, fiber.Map{
|
||||
"timestamp": log.Timestamp.Format("2006-01-02T15:04:05"),
|
||||
"category": log.Category,
|
||||
"message": log.Message,
|
||||
"type": logTypeStr,
|
||||
"typeClass": logTypeClass,
|
||||
})
|
||||
}
|
||||
|
||||
return c.Render("admin/system/logs", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
"logs": formattedLogs,
|
||||
"total": totalLogs,
|
||||
"showing": len(formattedLogs),
|
||||
"page": page,
|
||||
"totalPages": totalPages,
|
||||
"categoryParam": category,
|
||||
"typeParam": c.Query("type", ""),
|
||||
"fromParam": c.Query("from", ""),
|
||||
"toParam": c.Query("to", ""),
|
||||
})
|
||||
}
|
||||
|
||||
// GetLogsAPI returns logs in JSON format for API consumption
|
||||
func (h *LogHandler) GetLogsAPI(c *fiber.Ctx) error {
|
||||
// Check which logger to use based on the log type parameter
|
||||
logTypeParam := c.Query("log_type", string(LogTypeSystem))
|
||||
|
||||
// Parse query parameters
|
||||
category := c.Query("category", "")
|
||||
logItemType := parseLogType(c.Query("type", ""))
|
||||
maxItems := c.QueryInt("max_items", 100)
|
||||
|
||||
// Parse time range
|
||||
fromTime := parseTimeParam(c.Query("from", ""))
|
||||
toTime := parseTimeParam(c.Query("to", ""))
|
||||
|
||||
// Create search arguments
|
||||
searchArgs := logger.SearchArgs{
|
||||
Category: category,
|
||||
LogType: logItemType,
|
||||
MaxItems: maxItems,
|
||||
}
|
||||
|
||||
if !fromTime.IsZero() {
|
||||
searchArgs.TimestampFrom = &fromTime
|
||||
}
|
||||
|
||||
if !toTime.IsZero() {
|
||||
searchArgs.TimestampTo = &toTime
|
||||
}
|
||||
|
||||
// Variables for logs and error
|
||||
var logs []logger.LogItem
|
||||
var err error
|
||||
|
||||
// Check if we want to merge logs from all sources
|
||||
if LogType(logTypeParam) == LogTypeAll {
|
||||
// Get merged logs from all loggers
|
||||
logs, err = h.getMergedLogs(searchArgs)
|
||||
} else {
|
||||
// Select the appropriate logger based on the log type
|
||||
var selectedLogger *logger.Logger
|
||||
|
||||
switch LogType(logTypeParam) {
|
||||
case LogTypeService:
|
||||
selectedLogger = h.serviceLogger
|
||||
case LogTypeJob:
|
||||
selectedLogger = h.jobLogger
|
||||
case LogTypeProcess:
|
||||
selectedLogger = h.processLogger
|
||||
default:
|
||||
selectedLogger = h.systemLogger
|
||||
}
|
||||
|
||||
// Check if the selected logger is properly initialized
|
||||
if selectedLogger == nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "Logger not initialized",
|
||||
})
|
||||
}
|
||||
|
||||
// Search for logs using the selected logger
|
||||
logs, err = selectedLogger.Search(searchArgs)
|
||||
}
|
||||
|
||||
// Handle search error
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
// Convert logs to a format suitable for the UI
|
||||
response := make([]fiber.Map, 0, len(logs))
|
||||
for _, log := range logs {
|
||||
logTypeStr := "INFO"
|
||||
if log.LogType == logger.LogTypeError {
|
||||
logTypeStr = "ERROR"
|
||||
}
|
||||
|
||||
response = append(response, fiber.Map{
|
||||
"timestamp": log.Timestamp.Format(time.RFC3339),
|
||||
"category": log.Category,
|
||||
"message": log.Message,
|
||||
"type": logTypeStr,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"logs": response,
|
||||
"total": len(logs),
|
||||
})
|
||||
}
|
||||
|
||||
// GetLogsFragment returns logs in HTML format for Unpoly partial updates
|
||||
func (h *LogHandler) GetLogsFragment(c *fiber.Ctx) error {
|
||||
// This is a fragment template for Unpoly updates
|
||||
|
||||
// Check which logger to use based on the log type parameter
|
||||
logTypeParam := c.Query("log_type", string(LogTypeSystem))
|
||||
|
||||
// Parse query parameters
|
||||
category := c.Query("category", "")
|
||||
logItemType := parseLogType(c.Query("type", ""))
|
||||
maxItems := c.QueryInt("max_items", 100)
|
||||
page := c.QueryInt("page", 1)
|
||||
itemsPerPage := 20 // Default items per page
|
||||
|
||||
// Parse time range
|
||||
fromTime := parseTimeParam(c.Query("from", ""))
|
||||
toTime := parseTimeParam(c.Query("to", ""))
|
||||
|
||||
// Create search arguments
|
||||
searchArgs := logger.SearchArgs{
|
||||
Category: category,
|
||||
LogType: logItemType,
|
||||
MaxItems: maxItems,
|
||||
}
|
||||
|
||||
if !fromTime.IsZero() {
|
||||
searchArgs.TimestampFrom = &fromTime
|
||||
}
|
||||
|
||||
if !toTime.IsZero() {
|
||||
searchArgs.TimestampTo = &toTime
|
||||
}
|
||||
|
||||
// Variables for logs and error
|
||||
var logs []logger.LogItem
|
||||
var err error
|
||||
var logTypeTitle string
|
||||
|
||||
// Check if we want to merge logs from all sources
|
||||
if LogType(logTypeParam) == LogTypeAll {
|
||||
// Get merged logs from all loggers
|
||||
logs, err = h.getMergedLogs(searchArgs)
|
||||
logTypeTitle = "All Logs"
|
||||
} else {
|
||||
// Select the appropriate logger based on the log type
|
||||
var selectedLogger *logger.Logger
|
||||
|
||||
switch LogType(logTypeParam) {
|
||||
case LogTypeService:
|
||||
selectedLogger = h.serviceLogger
|
||||
logTypeTitle = "Service Logs"
|
||||
case LogTypeJob:
|
||||
selectedLogger = h.jobLogger
|
||||
logTypeTitle = "Job Logs"
|
||||
case LogTypeProcess:
|
||||
selectedLogger = h.processLogger
|
||||
logTypeTitle = "Process Logs"
|
||||
default:
|
||||
selectedLogger = h.systemLogger
|
||||
logTypeTitle = "System Logs"
|
||||
}
|
||||
|
||||
// Check if the selected logger is properly initialized
|
||||
if selectedLogger == nil {
|
||||
return c.Render("admin/system/logs_fragment", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"error": "Logger not initialized",
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
})
|
||||
}
|
||||
|
||||
// Search for logs using the selected logger
|
||||
logs, err = selectedLogger.Search(searchArgs)
|
||||
}
|
||||
|
||||
// Handle search error
|
||||
if err != nil {
|
||||
return c.Render("admin/system/logs_fragment", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"error": err.Error(),
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
})
|
||||
}
|
||||
|
||||
// Calculate total pages
|
||||
totalLogs := len(logs)
|
||||
totalPages := (totalLogs + itemsPerPage - 1) / itemsPerPage
|
||||
|
||||
// Apply pagination
|
||||
startIndex := (page - 1) * itemsPerPage
|
||||
endIndex := startIndex + itemsPerPage
|
||||
if endIndex > totalLogs {
|
||||
endIndex = totalLogs
|
||||
}
|
||||
|
||||
// Slice logs for current page
|
||||
pagedLogs := logs
|
||||
if startIndex < totalLogs {
|
||||
pagedLogs = logs[startIndex:endIndex]
|
||||
} else {
|
||||
pagedLogs = []logger.LogItem{}
|
||||
}
|
||||
|
||||
// Convert logs to a format suitable for the UI
|
||||
formattedLogs := make([]fiber.Map, 0, len(pagedLogs))
|
||||
for _, log := range pagedLogs {
|
||||
logTypeStr := "INFO"
|
||||
logTypeClass := "log-info"
|
||||
if log.LogType == logger.LogTypeError {
|
||||
logTypeStr = "ERROR"
|
||||
logTypeClass = "log-error"
|
||||
}
|
||||
|
||||
formattedLogs = append(formattedLogs, fiber.Map{
|
||||
"timestamp": log.Timestamp.Format("2006-01-02T15:04:05"),
|
||||
"category": log.Category,
|
||||
"message": log.Message,
|
||||
"type": logTypeStr,
|
||||
"typeClass": logTypeClass,
|
||||
})
|
||||
}
|
||||
|
||||
// Set layout to empty to disable the layout for fragment responses
|
||||
return c.Render("admin/system/logs_fragment", fiber.Map{
|
||||
"title": logTypeTitle,
|
||||
"logTypes": []LogType{LogTypeAll, LogTypeSystem, LogTypeService, LogTypeJob, LogTypeProcess},
|
||||
"selectedLogType": logTypeParam,
|
||||
"logs": formattedLogs,
|
||||
"total": totalLogs,
|
||||
"showing": len(formattedLogs),
|
||||
"page": page,
|
||||
"totalPages": totalPages,
|
||||
"layout": "", // Disable layout for partial template
|
||||
})
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
// parseLogType converts a string log type to the appropriate LogType enum
|
||||
func parseLogType(logTypeStr string) logger.LogType {
|
||||
switch logTypeStr {
|
||||
case "error":
|
||||
return logger.LogTypeError
|
||||
default:
|
||||
return logger.LogTypeStdout
|
||||
}
|
||||
}
|
||||
|
||||
// parseTimeParam parses a time string in ISO format
|
||||
func parseTimeParam(timeStr string) time.Time {
|
||||
if timeStr == "" {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339, timeStr)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// getMergedLogs retrieves and merges logs from all available loggers
|
||||
func (h *LogHandler) getMergedLogs(args logger.SearchArgs) ([]logger.LogItem, error) {
|
||||
// Create a slice to hold all logs
|
||||
allLogs := make([]logger.LogItem, 0)
|
||||
|
||||
// Create a map to track errors
|
||||
errors := make(map[string]error)
|
||||
|
||||
// Get logs from system logger if available
|
||||
if h.systemLogger != nil {
|
||||
systemLogs, err := h.systemLogger.Search(args)
|
||||
if err != nil {
|
||||
errors["system"] = err
|
||||
} else {
|
||||
// Add source information to each log item
|
||||
for i := range systemLogs {
|
||||
systemLogs[i].Category = fmt.Sprintf("system:%s", systemLogs[i].Category)
|
||||
}
|
||||
allLogs = append(allLogs, systemLogs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Get logs from service logger if available
|
||||
if h.serviceLogger != nil {
|
||||
serviceLogs, err := h.serviceLogger.Search(args)
|
||||
if err != nil {
|
||||
errors["service"] = err
|
||||
} else {
|
||||
// Add source information to each log item
|
||||
for i := range serviceLogs {
|
||||
serviceLogs[i].Category = fmt.Sprintf("service:%s", serviceLogs[i].Category)
|
||||
}
|
||||
allLogs = append(allLogs, serviceLogs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Get logs from job logger if available
|
||||
if h.jobLogger != nil {
|
||||
jobLogs, err := h.jobLogger.Search(args)
|
||||
if err != nil {
|
||||
errors["job"] = err
|
||||
} else {
|
||||
// Add source information to each log item
|
||||
for i := range jobLogs {
|
||||
jobLogs[i].Category = fmt.Sprintf("job:%s", jobLogs[i].Category)
|
||||
}
|
||||
allLogs = append(allLogs, jobLogs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Get logs from process logger if available
|
||||
if h.processLogger != nil {
|
||||
processLogs, err := h.processLogger.Search(args)
|
||||
if err != nil {
|
||||
errors["process"] = err
|
||||
} else {
|
||||
// Add source information to each log item
|
||||
for i := range processLogs {
|
||||
processLogs[i].Category = fmt.Sprintf("process:%s", processLogs[i].Category)
|
||||
}
|
||||
allLogs = append(allLogs, processLogs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have any logs
|
||||
if len(allLogs) == 0 && len(errors) > 0 {
|
||||
// Combine error messages
|
||||
errorMsgs := make([]string, 0, len(errors))
|
||||
for source, err := range errors {
|
||||
errorMsgs = append(errorMsgs, fmt.Sprintf("%s: %s", source, err.Error()))
|
||||
}
|
||||
return nil, fmt.Errorf("failed to retrieve logs: %s", strings.Join(errorMsgs, "; "))
|
||||
}
|
||||
|
||||
// Sort logs by timestamp (newest first)
|
||||
sort.Slice(allLogs, func(i, j int) bool {
|
||||
return allLogs[i].Timestamp.After(allLogs[j].Timestamp)
|
||||
})
|
||||
|
||||
// Apply max items limit if specified
|
||||
if args.MaxItems > 0 && len(allLogs) > args.MaxItems {
|
||||
allLogs = allLogs[:args.MaxItems]
|
||||
}
|
||||
|
||||
return allLogs, nil
|
||||
}
|
203
_pkg2_dont_use/heroagent/handlers/process_handlers.go
Normal file
203
_pkg2_dont_use/heroagent/handlers/process_handlers.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/system/stats"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// ProcessHandler handles process-related routes
|
||||
type ProcessHandler struct {
|
||||
statsManager *stats.StatsManager
|
||||
}
|
||||
|
||||
// NewProcessHandler creates a new ProcessHandler
|
||||
func NewProcessHandler(statsManager *stats.StatsManager) *ProcessHandler {
|
||||
return &ProcessHandler{
|
||||
statsManager: statsManager,
|
||||
}
|
||||
}
|
||||
|
||||
// GetProcessStatsJSON returns process stats in JSON format for API consumption
|
||||
func (h *ProcessHandler) GetProcessStatsJSON(c *fiber.Ctx) error {
|
||||
// Check if StatsManager is properly initialized
|
||||
if h.statsManager == nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "System error: Stats manager not initialized",
|
||||
})
|
||||
}
|
||||
|
||||
// Get process data from the StatsManager
|
||||
processData, err := h.statsManager.GetProcessStatsFresh(100) // Limit to 100 processes
|
||||
if err != nil {
|
||||
// Try getting cached data as fallback
|
||||
processData, err = h.statsManager.GetProcessStats(100)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "Failed to get process data: " + err.Error(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to fiber.Map for JSON response
|
||||
response := fiber.Map{
|
||||
"total": processData.Total,
|
||||
"filtered": processData.Filtered,
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
|
||||
// Convert processes to a slice of maps
|
||||
processes := make([]fiber.Map, len(processData.Processes))
|
||||
for i, proc := range processData.Processes {
|
||||
processes[i] = fiber.Map{
|
||||
"pid": proc.PID,
|
||||
"name": proc.Name,
|
||||
"status": proc.Status,
|
||||
"cpu_percent": proc.CPUPercent,
|
||||
"memory_mb": proc.MemoryMB,
|
||||
"create_time_str": proc.CreateTime,
|
||||
"is_current": proc.IsCurrent,
|
||||
}
|
||||
}
|
||||
|
||||
response["processes"] = processes
|
||||
|
||||
// Return JSON response
|
||||
return c.JSON(response)
|
||||
}
|
||||
|
||||
// GetProcesses renders the processes page with initial process data
|
||||
func (h *ProcessHandler) GetProcesses(c *fiber.Ctx) error {
|
||||
// Check if StatsManager is properly initialized
|
||||
if h.statsManager == nil {
|
||||
return c.Render("admin/system/processes", fiber.Map{
|
||||
"processes": []fiber.Map{},
|
||||
"error": "System error: Stats manager not initialized",
|
||||
"warning": "The process manager is not properly initialized.",
|
||||
})
|
||||
}
|
||||
|
||||
// Force cache refresh for process stats
|
||||
h.statsManager.ForceUpdate("process")
|
||||
|
||||
// Get process data from the StatsManager
|
||||
processData, err := h.statsManager.GetProcessStatsFresh(0) // Get all processes with fresh data
|
||||
if err != nil {
|
||||
// Try getting cached data as fallback
|
||||
processData, err = h.statsManager.GetProcessStats(0)
|
||||
if err != nil {
|
||||
// If there's an error, still render the page but with empty data
|
||||
return c.Render("admin/system/processes", fiber.Map{
|
||||
"processes": []fiber.Map{},
|
||||
"error": "Failed to load process data: " + err.Error(),
|
||||
"warning": "System attempted both fresh and cached data retrieval but failed.",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to []fiber.Map for template rendering
|
||||
processStats := make([]fiber.Map, len(processData.Processes))
|
||||
for i, proc := range processData.Processes {
|
||||
processStats[i] = fiber.Map{
|
||||
"pid": proc.PID,
|
||||
"name": proc.Name,
|
||||
"status": proc.Status,
|
||||
"cpu_percent": proc.CPUPercent,
|
||||
"memory_mb": proc.MemoryMB,
|
||||
"create_time_str": proc.CreateTime,
|
||||
"is_current": proc.IsCurrent,
|
||||
"cpu_percent_str": fmt.Sprintf("%.1f%%", proc.CPUPercent),
|
||||
"memory_mb_str": fmt.Sprintf("%.1f MB", proc.MemoryMB),
|
||||
}
|
||||
}
|
||||
|
||||
// Render the full page with initial process data
|
||||
return c.Render("admin/system/processes", fiber.Map{
|
||||
"processes": processStats,
|
||||
})
|
||||
}
|
||||
|
||||
// GetProcessesData returns the HTML fragment for processes data
|
||||
func (h *ProcessHandler) GetProcessesData(c *fiber.Ctx) error {
|
||||
// Check if this is a manual refresh request (with X-Requested-With header set)
|
||||
isManualRefresh := c.Get("X-Requested-With") == "XMLHttpRequest"
|
||||
|
||||
// Check if StatsManager is properly initialized
|
||||
if h.statsManager == nil {
|
||||
return c.Render("admin/system/processes_data", fiber.Map{
|
||||
"error": "System error: Stats manager not initialized",
|
||||
"layout": "",
|
||||
})
|
||||
}
|
||||
|
||||
// For manual refresh, always get fresh data by forcing cache invalidation
|
||||
var processData *stats.ProcessStats
|
||||
var err error
|
||||
|
||||
// Force cache refresh for process stats on manual refresh
|
||||
if isManualRefresh {
|
||||
h.statsManager.ForceUpdate("process")
|
||||
}
|
||||
|
||||
if isManualRefresh {
|
||||
// Force bypass cache for manual refresh by using fresh data
|
||||
processData, err = h.statsManager.GetProcessStatsFresh(0)
|
||||
} else {
|
||||
// Use cached data for auto-polling
|
||||
processData, err = h.statsManager.GetProcessStats(0)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Try alternative method if the primary method fails
|
||||
if isManualRefresh {
|
||||
processData, err = h.statsManager.GetProcessStats(0)
|
||||
} else {
|
||||
processData, err = h.statsManager.GetProcessStatsFresh(0)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Handle AJAX requests differently from regular requests
|
||||
isAjax := c.Get("X-Requested-With") == "XMLHttpRequest"
|
||||
if isAjax {
|
||||
return c.Status(fiber.StatusInternalServerError).SendString("Failed to get process data: " + err.Error())
|
||||
}
|
||||
// For regular requests, render the error within the fragment
|
||||
return c.Render("admin/system/processes_data", fiber.Map{
|
||||
"error": "Failed to get process data: " + err.Error(),
|
||||
"layout": "",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to []fiber.Map for template rendering
|
||||
processStats := make([]fiber.Map, len(processData.Processes))
|
||||
for i, proc := range processData.Processes {
|
||||
processStats[i] = fiber.Map{
|
||||
"pid": proc.PID,
|
||||
"name": proc.Name,
|
||||
"status": proc.Status,
|
||||
"cpu_percent": proc.CPUPercent,
|
||||
"memory_mb": proc.MemoryMB,
|
||||
"create_time_str": proc.CreateTime,
|
||||
"is_current": proc.IsCurrent,
|
||||
"cpu_percent_str": fmt.Sprintf("%.1f%%", proc.CPUPercent),
|
||||
"memory_mb_str": fmt.Sprintf("%.1f MB", proc.MemoryMB),
|
||||
}
|
||||
}
|
||||
|
||||
// Create a boolean to indicate if we have processes
|
||||
hasProcesses := len(processStats) > 0
|
||||
|
||||
// Create template data with fiber.Map
|
||||
templateData := fiber.Map{
|
||||
"hasProcesses": hasProcesses,
|
||||
"processCount": len(processStats),
|
||||
"processStats": processStats,
|
||||
"layout": "", // Disable layout for partial template
|
||||
}
|
||||
|
||||
// Return only the table HTML content directly to be injected into the processes-table-content div
|
||||
return c.Render("admin/system/processes_data", templateData)
|
||||
}
|
266
_pkg2_dont_use/heroagent/handlers/service_handlers.go
Normal file
266
_pkg2_dont_use/heroagent/handlers/service_handlers.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/processmanager/interfaces"
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/processmanager/interfaces/openrpc"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
// ServiceHandler handles service-related routes
|
||||
type ServiceHandler struct {
|
||||
client *openrpc.Client
|
||||
}
|
||||
|
||||
// NewServiceHandler creates a new ServiceHandler
|
||||
func NewServiceHandler(socketPath, secret string) *ServiceHandler {
|
||||
fmt.Printf("DEBUG: Creating new ServiceHandler with socket path: %s and secret: %s\n", socketPath, secret)
|
||||
return &ServiceHandler{
|
||||
client: openrpc.NewClient(socketPath, secret),
|
||||
}
|
||||
}
|
||||
|
||||
// GetServices renders the services page
|
||||
func (h *ServiceHandler) GetServices(c *fiber.Ctx) error {
|
||||
return c.Render("admin/services", fiber.Map{
|
||||
"title": "Services",
|
||||
"error": c.Query("error", ""),
|
||||
"warning": c.Query("warning", ""),
|
||||
})
|
||||
}
|
||||
|
||||
// GetServicesFragment returns the services table fragment for Unpoly updates
|
||||
func (h *ServiceHandler) GetServicesFragment(c *fiber.Ctx) error {
|
||||
processes, err := h.getProcessList()
|
||||
if err != nil {
|
||||
return c.Render("admin/services_fragment", fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to fetch services: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
return c.Render("admin/services_fragment", fiber.Map{
|
||||
"processes": processes,
|
||||
})
|
||||
}
|
||||
|
||||
// StartService handles the request to start a new service
|
||||
func (h *ServiceHandler) StartService(c *fiber.Ctx) error {
|
||||
name := c.FormValue("name")
|
||||
command := c.FormValue("command")
|
||||
|
||||
if name == "" || command == "" {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": "Service name and command are required",
|
||||
})
|
||||
}
|
||||
|
||||
// Default to enabling logs
|
||||
logEnabled := true
|
||||
|
||||
// Start the process with no deadline, no cron, and no job ID
|
||||
fmt.Printf("DEBUG: StartService called for '%s' using client: %p\n", name, h.client)
|
||||
result, err := h.client.StartProcess(name, command, logEnabled, 0, "", "")
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to start service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": result.Message,
|
||||
"pid": result.PID,
|
||||
})
|
||||
}
|
||||
|
||||
// StopService handles the request to stop a service
|
||||
func (h *ServiceHandler) StopService(c *fiber.Ctx) error {
|
||||
name := c.FormValue("name")
|
||||
|
||||
if name == "" {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": "Service name is required",
|
||||
})
|
||||
}
|
||||
|
||||
result, err := h.client.StopProcess(name)
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to stop service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
// RestartService handles the request to restart a service
|
||||
func (h *ServiceHandler) RestartService(c *fiber.Ctx) error {
|
||||
name := c.FormValue("name")
|
||||
|
||||
if name == "" {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": "Service name is required",
|
||||
})
|
||||
}
|
||||
|
||||
result, err := h.client.RestartProcess(name)
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to restart service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": result.Message,
|
||||
"pid": result.PID,
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteService handles the request to delete a service
|
||||
func (h *ServiceHandler) DeleteService(c *fiber.Ctx) error {
|
||||
name := c.FormValue("name")
|
||||
|
||||
if name == "" {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": "Service name is required",
|
||||
})
|
||||
}
|
||||
|
||||
result, err := h.client.DeleteProcess(name)
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to delete service: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"message": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
// GetServiceLogs handles the request to get logs for a service
|
||||
func (h *ServiceHandler) GetServiceLogs(c *fiber.Ctx) error {
|
||||
name := c.Query("name")
|
||||
lines := c.QueryInt("lines", 100)
|
||||
|
||||
fmt.Printf("DEBUG: GetServiceLogs called for service '%s' using client: %p\n", name, h.client)
|
||||
|
||||
if name == "" {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": "Service name is required",
|
||||
})
|
||||
}
|
||||
|
||||
// Debug: List all processes before getting logs
|
||||
processes, listErr := h.getProcessList()
|
||||
if listErr == nil {
|
||||
fmt.Println("DEBUG: Current processes in service handler:")
|
||||
for _, proc := range processes {
|
||||
fmt.Printf("DEBUG: - '%v' (PID: %v, Status: %v)\n", proc["Name"], proc["ID"], proc["Status"])
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("DEBUG: Error listing processes: %v\n", listErr)
|
||||
}
|
||||
|
||||
result, err := h.client.GetProcessLogs(name, lines)
|
||||
if err != nil {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": fmt.Sprintf("Failed to get service logs: %v", err),
|
||||
})
|
||||
}
|
||||
|
||||
if !result.Success {
|
||||
return c.JSON(fiber.Map{
|
||||
"error": result.Message,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(fiber.Map{
|
||||
"success": true,
|
||||
"logs": result.Logs,
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to get the list of processes and format them for the UI
|
||||
func (h *ServiceHandler) getProcessList() ([]fiber.Map, error) {
|
||||
// Get the list of processes
|
||||
result, err := h.client.ListProcesses("json")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list processes: %v", err)
|
||||
}
|
||||
|
||||
// Convert the result to a slice of ProcessStatus
|
||||
processList, ok := result.([]interfaces.ProcessStatus)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected result type from ListProcesses")
|
||||
}
|
||||
|
||||
// Format the processes for the UI
|
||||
formattedProcesses := make([]fiber.Map, 0, len(processList))
|
||||
for _, proc := range processList {
|
||||
// Calculate uptime
|
||||
uptime := "N/A"
|
||||
if proc.Status == "running" {
|
||||
duration := time.Since(proc.StartTime)
|
||||
if duration.Hours() >= 24 {
|
||||
days := int(duration.Hours() / 24)
|
||||
hours := int(duration.Hours()) % 24
|
||||
uptime = fmt.Sprintf("%dd %dh", days, hours)
|
||||
} else if duration.Hours() >= 1 {
|
||||
hours := int(duration.Hours())
|
||||
minutes := int(duration.Minutes()) % 60
|
||||
uptime = fmt.Sprintf("%dh %dm", hours, minutes)
|
||||
} else {
|
||||
minutes := int(duration.Minutes())
|
||||
seconds := int(duration.Seconds()) % 60
|
||||
uptime = fmt.Sprintf("%dm %ds", minutes, seconds)
|
||||
}
|
||||
}
|
||||
|
||||
// Format CPU and memory usage
|
||||
cpuUsage := fmt.Sprintf("%.1f%%", proc.CPUPercent)
|
||||
memoryUsage := fmt.Sprintf("%.1f MB", proc.MemoryMB)
|
||||
|
||||
formattedProcesses = append(formattedProcesses, fiber.Map{
|
||||
"Name": proc.Name,
|
||||
"Status": string(proc.Status),
|
||||
"ID": proc.PID,
|
||||
"CPU": cpuUsage,
|
||||
"Memory": memoryUsage,
|
||||
"Uptime": uptime,
|
||||
})
|
||||
}
|
||||
|
||||
return formattedProcesses, nil
|
||||
}
|
375
_pkg2_dont_use/heroagent/handlers/system_handlers.go
Normal file
375
_pkg2_dont_use/heroagent/handlers/system_handlers.go
Normal file
@@ -0,0 +1,375 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.ourworld.tf/herocode/heroagent/pkg/system/stats"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/shirou/gopsutil/v3/host"
|
||||
)
|
||||
|
||||
// UptimeProvider defines an interface for getting system uptime
|
||||
type UptimeProvider interface {
|
||||
GetUptime() string
|
||||
}
|
||||
|
||||
// SystemHandler handles system-related page routes
|
||||
type SystemHandler struct {
|
||||
uptimeProvider UptimeProvider
|
||||
statsManager *stats.StatsManager
|
||||
}
|
||||
|
||||
// NewSystemHandler creates a new SystemHandler
|
||||
func NewSystemHandler(uptimeProvider UptimeProvider, statsManager *stats.StatsManager) *SystemHandler {
|
||||
// If statsManager is nil, create a new one with default settings
|
||||
if statsManager == nil {
|
||||
var err error
|
||||
statsManager, err = stats.NewStatsManagerWithDefaults()
|
||||
if err != nil {
|
||||
// Log the error but continue with nil statsManager
|
||||
fmt.Printf("Error creating StatsManager: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &SystemHandler{
|
||||
uptimeProvider: uptimeProvider,
|
||||
statsManager: statsManager,
|
||||
}
|
||||
}
|
||||
|
||||
// GetSystemInfo renders the system info page
|
||||
func (h *SystemHandler) GetSystemInfo(c *fiber.Ctx) error {
|
||||
// Initialize default values
|
||||
cpuInfo := "Unknown"
|
||||
memoryInfo := "Unknown"
|
||||
diskInfo := "Unknown"
|
||||
networkInfo := "Unknown"
|
||||
osInfo := "Unknown"
|
||||
uptimeInfo := "Unknown"
|
||||
|
||||
// Get hardware stats from the StatsManager
|
||||
var hardwareStats map[string]interface{}
|
||||
if h.statsManager != nil {
|
||||
hardwareStats = h.statsManager.GetHardwareStats()
|
||||
} else {
|
||||
// Fallback to direct function call if StatsManager is not available
|
||||
hardwareStats = stats.GetHardwareStats()
|
||||
}
|
||||
|
||||
// Extract the formatted strings - safely handle different return types
|
||||
if cpuVal, ok := hardwareStats["cpu"]; ok {
|
||||
switch v := cpuVal.(type) {
|
||||
case string:
|
||||
cpuInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
if model, ok := v["model"].(string); ok {
|
||||
usage := 0.0
|
||||
if usagePercent, ok := v["usage_percent"].(float64); ok {
|
||||
usage = usagePercent
|
||||
}
|
||||
cpuInfo = fmt.Sprintf("%s (Usage: %.1f%%)", model, usage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if memVal, ok := hardwareStats["memory"]; ok {
|
||||
switch v := memVal.(type) {
|
||||
case string:
|
||||
memoryInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
usedPercent := 0.0
|
||||
if percent, ok := v["used_percent"].(float64); ok {
|
||||
usedPercent = percent
|
||||
}
|
||||
memoryInfo = fmt.Sprintf("%.1f GB / %.1f GB (%.1f%% used)", used, total, usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
if diskVal, ok := hardwareStats["disk"]; ok {
|
||||
switch v := diskVal.(type) {
|
||||
case string:
|
||||
diskInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
usedPercent := 0.0
|
||||
if percent, ok := v["used_percent"].(float64); ok {
|
||||
usedPercent = percent
|
||||
}
|
||||
diskInfo = fmt.Sprintf("%.1f GB / %.1f GB (%.1f%% used)", used, total, usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
if netVal, ok := hardwareStats["network"]; ok {
|
||||
switch v := netVal.(type) {
|
||||
case string:
|
||||
networkInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
var interfaces []string
|
||||
if ifaces, ok := v["interfaces"].([]interface{}); ok {
|
||||
for _, iface := range ifaces {
|
||||
if ifaceMap, ok := iface.(map[string]interface{}); ok {
|
||||
name := ifaceMap["name"].(string)
|
||||
ip := ifaceMap["ip"].(string)
|
||||
interfaces = append(interfaces, fmt.Sprintf("%s: %s", name, ip))
|
||||
}
|
||||
}
|
||||
networkInfo = strings.Join(interfaces, ", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get OS info
|
||||
hostInfo, err := host.Info()
|
||||
if err == nil {
|
||||
osInfo = fmt.Sprintf("%s %s (%s)", hostInfo.Platform, hostInfo.PlatformVersion, hostInfo.KernelVersion)
|
||||
}
|
||||
|
||||
// Get uptime
|
||||
if h.uptimeProvider != nil {
|
||||
uptimeInfo = h.uptimeProvider.GetUptime()
|
||||
}
|
||||
|
||||
// Render the template with the system info
|
||||
return c.Render("admin/system/info", fiber.Map{
|
||||
"title": "System Information",
|
||||
"cpuInfo": cpuInfo,
|
||||
"memoryInfo": memoryInfo,
|
||||
"diskInfo": diskInfo,
|
||||
"networkInfo": networkInfo,
|
||||
"osInfo": osInfo,
|
||||
"uptimeInfo": uptimeInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// GetHardwareStats returns only the hardware stats for Unpoly polling
|
||||
func (h *SystemHandler) GetHardwareStats(c *fiber.Ctx) error {
|
||||
// Initialize default values
|
||||
cpuInfo := "Unknown"
|
||||
memoryInfo := "Unknown"
|
||||
diskInfo := "Unknown"
|
||||
networkInfo := "Unknown"
|
||||
|
||||
// Get hardware stats from the StatsManager
|
||||
var hardwareStats map[string]interface{}
|
||||
if h.statsManager != nil {
|
||||
hardwareStats = h.statsManager.GetHardwareStats()
|
||||
} else {
|
||||
// Fallback to direct function call if StatsManager is not available
|
||||
hardwareStats = stats.GetHardwareStats()
|
||||
}
|
||||
|
||||
// Extract the formatted strings - safely handle different return types
|
||||
if cpuVal, ok := hardwareStats["cpu"]; ok {
|
||||
switch v := cpuVal.(type) {
|
||||
case string:
|
||||
cpuInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
if model, ok := v["model"].(string); ok {
|
||||
cpuInfo = model
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if memVal, ok := hardwareStats["memory"]; ok {
|
||||
switch v := memVal.(type) {
|
||||
case string:
|
||||
memoryInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
memoryInfo = fmt.Sprintf("%.1f GB / %.1f GB", used, total)
|
||||
}
|
||||
}
|
||||
|
||||
if diskVal, ok := hardwareStats["disk"]; ok {
|
||||
switch v := diskVal.(type) {
|
||||
case string:
|
||||
diskInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
total, used := 0.0, 0.0
|
||||
if totalGB, ok := v["total_gb"].(float64); ok {
|
||||
total = totalGB
|
||||
}
|
||||
if usedGB, ok := v["used_gb"].(float64); ok {
|
||||
used = usedGB
|
||||
}
|
||||
diskInfo = fmt.Sprintf("%.1f GB / %.1f GB", used, total)
|
||||
}
|
||||
}
|
||||
|
||||
if netVal, ok := hardwareStats["network"]; ok {
|
||||
switch v := netVal.(type) {
|
||||
case string:
|
||||
networkInfo = v
|
||||
case map[string]interface{}:
|
||||
// Format the map into a string
|
||||
var interfaces []string
|
||||
if ifaces, ok := v["interfaces"].([]interface{}); ok {
|
||||
for _, iface := range ifaces {
|
||||
if ifaceMap, ok := iface.(map[string]interface{}); ok {
|
||||
name := ifaceMap["name"].(string)
|
||||
ip := ifaceMap["ip"].(string)
|
||||
interfaces = append(interfaces, fmt.Sprintf("%s: %s", name, ip))
|
||||
}
|
||||
}
|
||||
networkInfo = strings.Join(interfaces, ", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format for display
|
||||
cpuUsage := "0.0%"
|
||||
memUsage := "0.0%"
|
||||
diskUsage := "0.0%"
|
||||
|
||||
// Safely extract usage percentages
|
||||
if cpuVal, ok := hardwareStats["cpu"].(map[string]interface{}); ok {
|
||||
if usagePercent, ok := cpuVal["usage_percent"].(float64); ok {
|
||||
cpuUsage = fmt.Sprintf("%.1f%%", usagePercent)
|
||||
}
|
||||
}
|
||||
|
||||
if memVal, ok := hardwareStats["memory"].(map[string]interface{}); ok {
|
||||
if usedPercent, ok := memVal["used_percent"].(float64); ok {
|
||||
memUsage = fmt.Sprintf("%.1f%%", usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
if diskVal, ok := hardwareStats["disk"].(map[string]interface{}); ok {
|
||||
if usedPercent, ok := diskVal["used_percent"].(float64); ok {
|
||||
diskUsage = fmt.Sprintf("%.1f%%", usedPercent)
|
||||
}
|
||||
}
|
||||
|
||||
// Render only the hardware stats fragment
|
||||
return c.Render("admin/system/hardware_stats_fragment", fiber.Map{
|
||||
"cpuInfo": cpuInfo,
|
||||
"memoryInfo": memoryInfo,
|
||||
"diskInfo": diskInfo,
|
||||
"networkInfo": networkInfo,
|
||||
"cpuUsage": cpuUsage,
|
||||
"memUsage": memUsage,
|
||||
"diskUsage": diskUsage,
|
||||
})
|
||||
}
|
||||
|
||||
// GetHardwareStatsAPI returns hardware stats in JSON format
|
||||
func (h *SystemHandler) GetHardwareStatsAPI(c *fiber.Ctx) error {
|
||||
// Get hardware stats from the StatsManager
|
||||
var hardwareStats map[string]interface{}
|
||||
if h.statsManager != nil {
|
||||
hardwareStats = h.statsManager.GetHardwareStats()
|
||||
} else {
|
||||
// Fallback to direct function call if StatsManager is not available
|
||||
hardwareStats = stats.GetHardwareStats()
|
||||
}
|
||||
|
||||
return c.JSON(hardwareStats)
|
||||
}
|
||||
|
||||
// GetProcessStatsAPI returns process stats in JSON format for API consumption
|
||||
func (h *SystemHandler) GetProcessStatsAPI(c *fiber.Ctx) error {
|
||||
// Check if StatsManager is properly initialized
|
||||
if h.statsManager == nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "System error: Stats manager not initialized",
|
||||
})
|
||||
}
|
||||
|
||||
// Get process data from the StatsManager
|
||||
processData, err := h.statsManager.GetProcessStatsFresh(100) // Limit to 100 processes
|
||||
if err != nil {
|
||||
// Try getting cached data as fallback
|
||||
processData, err = h.statsManager.GetProcessStats(100)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "Failed to get process data: " + err.Error(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to fiber.Map for JSON response
|
||||
response := fiber.Map{
|
||||
"total": processData.Total,
|
||||
"filtered": processData.Filtered,
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
|
||||
// Convert processes to a slice of maps
|
||||
processes := make([]fiber.Map, len(processData.Processes))
|
||||
for i, proc := range processData.Processes {
|
||||
processes[i] = fiber.Map{
|
||||
"pid": proc.PID,
|
||||
"name": proc.Name,
|
||||
"status": proc.Status,
|
||||
"cpu_percent": proc.CPUPercent,
|
||||
"memory_mb": proc.MemoryMB,
|
||||
"create_time_str": proc.CreateTime,
|
||||
"is_current": proc.IsCurrent,
|
||||
}
|
||||
}
|
||||
|
||||
response["processes"] = processes
|
||||
|
||||
// Return JSON response
|
||||
return c.JSON(response)
|
||||
}
|
||||
|
||||
// GetSystemLogs renders the system logs page
|
||||
func (h *SystemHandler) GetSystemLogs(c *fiber.Ctx) error {
|
||||
return c.Render("admin/system/logs", fiber.Map{
|
||||
"title": "System Logs",
|
||||
})
|
||||
}
|
||||
|
||||
// GetSystemLogsTest renders the test logs page
|
||||
func (h *SystemHandler) GetSystemLogsTest(c *fiber.Ctx) error {
|
||||
return c.Render("admin/system/logs_test", fiber.Map{
|
||||
"title": "Test Logs",
|
||||
})
|
||||
}
|
||||
|
||||
// GetSystemSettings renders the system settings page
|
||||
func (h *SystemHandler) GetSystemSettings(c *fiber.Ctx) error {
|
||||
// Get the current time
|
||||
currentTime := time.Now().Format("2006-01-02 15:04:05")
|
||||
|
||||
// Render the template with the system settings
|
||||
return c.Render("admin/system/settings", fiber.Map{
|
||||
"title": "System Settings",
|
||||
"currentTime": currentTime,
|
||||
"settings": map[string]interface{}{
|
||||
"autoUpdate": true,
|
||||
"logLevel": "info",
|
||||
"maxLogSize": "100MB",
|
||||
"backupFrequency": "Daily",
|
||||
},
|
||||
})
|
||||
}
|
Reference in New Issue
Block a user