add file browser component and widget

This commit is contained in:
Timur Gordon
2025-08-05 15:02:23 +02:00
parent 4e43c21b72
commit ba43a82db0
95 changed files with 17840 additions and 423 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,25 @@
[package]
name = "file-browser-mock-server"
version = "0.1.0"
edition = "2021"
[workspace]
[[bin]]
name = "mock-server"
path = "src/main.rs"
[dependencies]
axum = "0.7"
tokio = { version = "1.0", features = ["full"] }
tower-http = { version = "0.5", features = ["cors"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
uuid = { version = "1.0", features = ["v4"] }
chrono = { version = "0.4", features = ["serde"] }
tracing = "0.1"
tracing-subscriber = "0.3"
anyhow = "1.0"
clap = { version = "4.0", features = ["derive"] }
walkdir = "2.3"
base64 = "0.21"

View File

@@ -0,0 +1,3 @@
# File Browser Demo
This is a sample file for testing the file browser component.

View File

@@ -0,0 +1 @@
Sample notes file content.

View File

@@ -0,0 +1,3 @@
# Sample Report
This is a sample markdown report.

View File

@@ -0,0 +1 @@
Placeholder for image files.

View File

@@ -0,0 +1 @@
{"name": "sample-project", "version": "1.0.0"}

View File

@@ -0,0 +1,3 @@
# Project 1
Sample project documentation.

View File

@@ -0,0 +1 @@
This is a sample text file.

View File

@@ -0,0 +1,3 @@
# File Browser Demo
This is a sample file for testing the file browser component.

View File

@@ -0,0 +1,59 @@
# Design
## Overview
This document outlines a system design that satisfies the specified requirements for decentralized backend ownership. It describes how to implement core capabilities like isolation, delegation, and open logic control — without introducing tight coupling or central dependencies.
## Design Principles
### 1. **Contextual Execution**
- Define a runtime model where each peer context is a named environment.
- Execution is scoped to a context, and all operations are resolved within it.
**Implementation Strategy:**
- Use a unified worker engine that can load and execute within a namespaced peer context.
- Contexts are mounted via a virtual filesystem abstraction, one directory per peer.
### 2. **Logical Isolation via Filesystem Namespacing**
- Each peer's execution environment is backed by a namespaced root directory.
- All storage operations are relative to that root.
**Advantages:**
- Easy enforcement of data boundaries
- Works across shared processes
### 3. **Script-Based Delegated Execution**
- Scripts are the unit of cross-peer interaction.
- A script includes the `caller` (originating peer), parameters, and logic.
**Design Feature:**
- A script sent to another peer is evaluated with both `caller` and `target` contexts available to the runtime.
- Target peer decides whether to accept and how to interpret it.
### 4. **Policy-Driven Acceptance**
- Each context has policies determining:
- Which peers may send scripts
- Which actions are allowed
**Example:** Policies written as declarative access control rules, tied to peer IDs, namespaces, or capabilities.
### 5. **Open, Modifiable Logic**
- Use an embedded domain-specific language (e.g. Rhai) that allows:
- Peer owners to define and inspect their logic
- Script modules to be composed, extended, or overridden
### 6. **Worker Multiplexing**
- Use a single worker binary that can handle one or many peer contexts.
- The context is dynamically determined at runtime.
**Design Note:**
- All workers enforce namespacing, even when only one peer is active per process.
- Supports both isolated (1 peer per worker) and shared (many peers per worker) deployments.
## Optional Enhancements
- Pluggable transport layer (WebSocket, HTTP/2, NATS, etc.)
- Pluggable storage backends for namespace-mounting (FS, S3, SQLite, etc.)
- Declarative schema binding between DSL and structured data
This design enables decentralized application runtime control while supporting a scalable and secure execution model.

View File

@@ -0,0 +1 @@
Sample notes file content.

View File

@@ -0,0 +1,3 @@
# Sample Report
This is a sample markdown report.

View File

@@ -0,0 +1 @@
Placeholder for image files.

View File

@@ -0,0 +1 @@
{"name": "sample-project", "version": "1.0.0"}

View File

@@ -0,0 +1,59 @@
# Design
## Overview
This document outlines a system design that satisfies the specified requirements for decentralized backend ownership. It describes how to implement core capabilities like isolation, delegation, and open logic control — without introducing tight coupling or central dependencies.
## Design Principles
### 1. **Contextual Execution**
- Define a runtime model where each peer context is a named environment.
- Execution is scoped to a context, and all operations are resolved within it.
**Implementation Strategy:**
- Use a unified worker engine that can load and execute within a namespaced peer context.
- Contexts are mounted via a virtual filesystem abstraction, one directory per peer.
### 2. **Logical Isolation via Filesystem Namespacing**
- Each peer's execution environment is backed by a namespaced root directory.
- All storage operations are relative to that root.
**Advantages:**
- Easy enforcement of data boundaries
- Works across shared processes
### 3. **Script-Based Delegated Execution**
- Scripts are the unit of cross-peer interaction.
- A script includes the `caller` (originating peer), parameters, and logic.
**Design Feature:**
- A script sent to another peer is evaluated with both `caller` and `target` contexts available to the runtime.
- Target peer decides whether to accept and how to interpret it.
### 4. **Policy-Driven Acceptance**
- Each context has policies determining:
- Which peers may send scripts
- Which actions are allowed
**Example:** Policies written as declarative access control rules, tied to peer IDs, namespaces, or capabilities.
### 5. **Open, Modifiable Logic**
- Use an embedded domain-specific language (e.g. Rhai) that allows:
- Peer owners to define and inspect their logic
- Script modules to be composed, extended, or overridden
### 6. **Worker Multiplexing**
- Use a single worker binary that can handle one or many peer contexts.
- The context is dynamically determined at runtime.
**Design Note:**
- All workers enforce namespacing, even when only one peer is active per process.
- Supports both isolated (1 peer per worker) and shared (many peers per worker) deployments.
## Optional Enhancements
- Pluggable transport layer (WebSocket, HTTP/2, NATS, etc.)
- Pluggable storage backends for namespace-mounting (FS, S3, SQLite, etc.)
- Declarative schema binding between DSL and structured data
This design enables decentralized application runtime control while supporting a scalable and secure execution model.

View File

@@ -0,0 +1,3 @@
# Project 1
Sample project documentation.

View File

@@ -0,0 +1,50 @@
# System Requirements Specification
## Objective
To define the core requirements for a system that fulfills the goals of decentralized backend ownership — enabling individuals and organizations to control, operate, and interact through their own backend environments without relying on centralized infrastructure.
## Functional Requirements
### 1. **Isolated Execution Contexts**
- Each user or peer must operate within a distinct, logically isolated execution context.
- Contexts must not be able to interfere with each other's state or runtime.
### 2. **Cross-Context Communication**
- Peers must be able to initiate interactions with other peers.
- Communication must include origin metadata (who initiated it), and be authorized by the target context.
### 3. **Delegated Execution**
- A peer must be able to send code or instructions to another peer for execution, under the recipient's policies.
- The recipient must treat the execution as contextualized by the caller, but constrained by its own local rules.
### 4. **Ownership of Logic and Data**
- Users must be able to inspect, modify, and extend the logic that governs their backend.
- Data storage and access policies must be under the control of the peer.
### 5. **Composability and Modifiability**
- System behavior must be defined by open, composable modules or scripts.
- Users must be able to override default behavior or extend it with minimal coupling.
## Non-Functional Requirements
### 6. **Security and Isolation**
- Scripts or instructions from external peers must be sandboxed and policy-checked.
- Each execution context must enforce boundaries between data and logic.
### 7. **Resilience and Redundancy**
- Failure of one peer or node must not impact others.
- Communication must be asynchronous and fault-tolerant.
### 8. **Portability**
- A peers logic and data must be portable across environments and host infrastructure.
- No assumption of persistent centralized hosting.
### 9. **Transparency**
- All logic must be auditable by its owner.
- Communications between peers must be observable and traceable.
### 10. **Scalability**
- The system must support large numbers of peer contexts, potentially hosted on shared infrastructure without compromising logical separation.
These requirements define the baseline for any system that claims to decentralize backend control and empower users to operate their own programmable, connected environments.

View File

@@ -0,0 +1,34 @@
# Rethinking Backend Ownership
## Motivation
Modern applications are powered by backends that run on infrastructure and systems controlled by centralized entities. Whether it's social platforms, collaboration tools, or data-driven apps, the backend is almost always a black box — hosted, maintained, and operated by someone else.
This has profound implications:
- **Loss of autonomy:** Users are locked out of the logic, rules, and data structures that govern their digital experience.
- **Opaque control:** Application behavior can change without the users consent — and often without visibility.
- **Vendor lock-in:** Switching providers or migrating data is often non-trivial, risky, or impossible.
- **Security and privacy risks:** Centralized backends present single points of failure and attack.
In this model, users are not participants in their computing environment — they are clients of someone else's backend.
## The Vision
The purpose of this initiative is to invert that dynamic. We aim to establish a paradigm where users and organizations **own and control their own backend logic and data**, without sacrificing connectivity, collaboration, or scalability.
This means:
- **Local authority:** Each user or organization should have full control over how their backend behaves — what code runs, what data is stored, and who can access it.
- **Portable and interoperable:** Ownership must not mean isolation. User-owned backends should be able to interact with one another on equal footing.
- **Transparent logic:** Application behavior should be visible, inspectable, and modifiable by the user.
- **Delegation, not dependence:** Users should be able to cooperate and interact by delegating execution to each other — not by relying on a central server.
## What We Stand For
- **Agency:** You control your digital environment.
- **Decentralization:** No central chokepoint for computation or data.
- **Modularity:** Users compose their backend behavior, not inherit it from a monolith.
- **Resilience:** Systems should degrade gracefully, fail independently, and recover without central orchestration.
This is about building a more equitable and open computing model — one where the backend serves you, not the other way around.

View File

@@ -0,0 +1 @@
This is a sample text file.

View File

@@ -0,0 +1,50 @@
# System Requirements Specification
## Objective
To define the core requirements for a system that fulfills the goals of decentralized backend ownership — enabling individuals and organizations to control, operate, and interact through their own backend environments without relying on centralized infrastructure.
## Functional Requirements
### 1. **Isolated Execution Contexts**
- Each user or peer must operate within a distinct, logically isolated execution context.
- Contexts must not be able to interfere with each other's state or runtime.
### 2. **Cross-Context Communication**
- Peers must be able to initiate interactions with other peers.
- Communication must include origin metadata (who initiated it), and be authorized by the target context.
### 3. **Delegated Execution**
- A peer must be able to send code or instructions to another peer for execution, under the recipient's policies.
- The recipient must treat the execution as contextualized by the caller, but constrained by its own local rules.
### 4. **Ownership of Logic and Data**
- Users must be able to inspect, modify, and extend the logic that governs their backend.
- Data storage and access policies must be under the control of the peer.
### 5. **Composability and Modifiability**
- System behavior must be defined by open, composable modules or scripts.
- Users must be able to override default behavior or extend it with minimal coupling.
## Non-Functional Requirements
### 6. **Security and Isolation**
- Scripts or instructions from external peers must be sandboxed and policy-checked.
- Each execution context must enforce boundaries between data and logic.
### 7. **Resilience and Redundancy**
- Failure of one peer or node must not impact others.
- Communication must be asynchronous and fault-tolerant.
### 8. **Portability**
- A peers logic and data must be portable across environments and host infrastructure.
- No assumption of persistent centralized hosting.
### 9. **Transparency**
- All logic must be auditable by its owner.
- Communications between peers must be observable and traceable.
### 10. **Scalability**
- The system must support large numbers of peer contexts, potentially hosted on shared infrastructure without compromising logical separation.
These requirements define the baseline for any system that claims to decentralize backend control and empower users to operate their own programmable, connected environments.

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,34 @@
# Rethinking Backend Ownership
## Motivation
Modern applications are powered by backends that run on infrastructure and systems controlled by centralized entities. Whether it's social platforms, collaboration tools, or data-driven apps, the backend is almost always a black box — hosted, maintained, and operated by someone else.
This has profound implications:
- **Loss of autonomy:** Users are locked out of the logic, rules, and data structures that govern their digital experience.
- **Opaque control:** Application behavior can change without the users consent — and often without visibility.
- **Vendor lock-in:** Switching providers or migrating data is often non-trivial, risky, or impossible.
- **Security and privacy risks:** Centralized backends present single points of failure and attack.
In this model, users are not participants in their computing environment — they are clients of someone else's backend.
## The Vision
The purpose of this initiative is to invert that dynamic. We aim to establish a paradigm where users and organizations **own and control their own backend logic and data**, without sacrificing connectivity, collaboration, or scalability.
This means:
- **Local authority:** Each user or organization should have full control over how their backend behaves — what code runs, what data is stored, and who can access it.
- **Portable and interoperable:** Ownership must not mean isolation. User-owned backends should be able to interact with one another on equal footing.
- **Transparent logic:** Application behavior should be visible, inspectable, and modifiable by the user.
- **Delegation, not dependence:** Users should be able to cooperate and interact by delegating execution to each other — not by relying on a central server.
## What We Stand For
- **Agency:** You control your digital environment.
- **Decentralization:** No central chokepoint for computation or data.
- **Modularity:** Users compose their backend behavior, not inherit it from a monolith.
- **Resilience:** Systems should degrade gracefully, fail independently, and recover without central orchestration.
This is about building a more equitable and open computing model — one where the backend serves you, not the other way around.

View File

@@ -0,0 +1,565 @@
use axum::{
extract::{DefaultBodyLimit, Path, Query},
http::{HeaderMap, StatusCode},
response::{IntoResponse, Json, Response},
routing::{delete, get, post},
Router,
};
use walkdir::WalkDir;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
fs,
path::{Path as StdPath, PathBuf},
sync::{Arc, Mutex},
};
use tower_http::cors::CorsLayer;
use tracing::{info, warn};
/// File/Directory item information
#[derive(Debug, Serialize, Deserialize)]
struct FileItem {
name: String,
path: String,
is_directory: bool,
size: Option<u64>,
modified: Option<String>,
hash: Option<String>,
}
/// API response for directory listing
#[derive(Debug, Serialize)]
struct ListResponse {
contents: Vec<FileItem>,
}
/// API response for errors
#[derive(Debug, Serialize)]
struct ErrorResponse {
error: String,
}
/// API response for success messages
#[derive(Debug, Serialize)]
struct SuccessResponse {
message: String,
}
/// Query parameters for listing
#[derive(Debug, Deserialize)]
struct ListQuery {
recursive: Option<bool>,
}
/// Mock server state
#[derive(Clone)]
struct AppState {
base_dir: PathBuf,
// Simple upload tracking: upload_id -> (filename, file_path)
uploads: Arc<Mutex<HashMap<String, (String, PathBuf)>>>,
}
impl AppState {
fn new() -> anyhow::Result<Self> {
let base_dir = PathBuf::from("./mock_files");
// Create base directory if it doesn't exist
fs::create_dir_all(&base_dir)?;
// Create some sample files and directories
create_sample_files(&base_dir)?;
Ok(AppState {
base_dir,
uploads: Arc::new(Mutex::new(HashMap::new())),
})
}
/// Get a safe path within the base directory
fn get_safe_path(&self, user_path: &str) -> Option<PathBuf> {
let user_path = if user_path.is_empty() || user_path == "." {
"".to_string()
} else {
user_path.to_string()
};
// Normalize path and prevent directory traversal
let normalized = user_path.replace("..", "").replace("//", "/");
let safe_path = self.base_dir.join(normalized);
// Ensure the path is within base directory
if safe_path.starts_with(&self.base_dir) {
Some(safe_path)
} else {
None
}
}
}
/// Create sample files and directories for demo
fn create_sample_files(base_dir: &StdPath) -> anyhow::Result<()> {
let sample_dirs = ["documents", "images", "projects"];
let sample_files = [
("README.md", "# File Browser Demo\n\nThis is a sample file for testing the file browser component."),
("sample.txt", "This is a sample text file."),
("documents/report.md", "# Sample Report\n\nThis is a sample markdown report."),
("documents/notes.txt", "Sample notes file content."),
("images/placeholder.txt", "Placeholder for image files."),
("projects/project1.md", "# Project 1\n\nSample project documentation."),
("projects/config.json", r#"{"name": "sample-project", "version": "1.0.0"}"#),
];
// Create sample directories
for dir in &sample_dirs {
let dir_path = base_dir.join(dir);
fs::create_dir_all(dir_path)?;
}
// Create sample files
for (file_path, content) in &sample_files {
let full_path = base_dir.join(file_path);
if let Some(parent) = full_path.parent() {
fs::create_dir_all(parent)?;
}
fs::write(full_path, content)?;
}
Ok(())
}
/// Convert file metadata to FileItem
fn file_to_item(path: &StdPath, base_dir: &StdPath) -> anyhow::Result<FileItem> {
let metadata = fs::metadata(path)?;
let name = path.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string();
let relative_path = path.strip_prefix(base_dir)
.map(|p| p.to_string_lossy().to_string())
.unwrap_or_else(|_| name.clone());
let modified = metadata.modified()
.ok()
.and_then(|time| DateTime::<Utc>::from(time).format("%Y-%m-%d %H:%M:%S").to_string().into());
Ok(FileItem {
name,
path: relative_path,
is_directory: metadata.is_dir(),
size: if metadata.is_file() { Some(metadata.len()) } else { None },
modified,
hash: None,
})
}
/// List directory contents (root)
/// GET /files/list/
async fn list_root_directory(
Query(params): Query<ListQuery>,
axum::extract::State(state): axum::extract::State<AppState>,
) -> impl IntoResponse {
list_directory_impl("".to_string(), params, state).await
}
/// List directory contents with path
/// GET /files/list/<path>
async fn list_directory(
Path(path): Path<String>,
Query(params): Query<ListQuery>,
axum::extract::State(state): axum::extract::State<AppState>,
) -> impl IntoResponse {
list_directory_impl(path, params, state).await
}
/// Internal implementation for directory listing
async fn list_directory_impl(
path: String,
params: ListQuery,
state: AppState,
) -> impl IntoResponse {
let safe_path = match state.get_safe_path(&path) {
Some(p) => p,
None => {
return (
StatusCode::BAD_REQUEST,
Json(ErrorResponse { error: "Invalid path".to_string() }),
).into_response();
}
};
if !safe_path.exists() || !safe_path.is_dir() {
return (
StatusCode::NOT_FOUND,
Json(ErrorResponse { error: "Directory not found".to_string() }),
).into_response();
}
let mut contents = Vec::new();
if params.recursive.unwrap_or(false) {
// Recursive listing
for entry in WalkDir::new(&safe_path) {
if let Ok(entry) = entry {
if entry.path() != safe_path {
if let Ok(item) = file_to_item(entry.path(), &state.base_dir) {
contents.push(item);
}
}
}
}
} else {
// Non-recursive listing
if let Ok(entries) = fs::read_dir(&safe_path) {
for entry in entries.flatten() {
if let Ok(item) = file_to_item(&entry.path(), &state.base_dir) {
contents.push(item);
}
}
}
}
// Sort: directories first, then files, both alphabetically
contents.sort_by(|a, b| {
match (a.is_directory, b.is_directory) {
(true, false) => std::cmp::Ordering::Less,
(false, true) => std::cmp::Ordering::Greater,
_ => a.name.cmp(&b.name),
}
});
Json(ListResponse { contents }).into_response()
}
/// Create directory
/// POST /files/dirs/<path>
async fn create_directory(
Path(path): Path<String>,
axum::extract::State(state): axum::extract::State<AppState>,
) -> Response {
let safe_path = match state.get_safe_path(&path) {
Some(p) => p,
None => {
return (
StatusCode::BAD_REQUEST,
Json(ErrorResponse { error: "Invalid path".to_string() }),
).into_response();
}
};
match fs::create_dir_all(&safe_path) {
Ok(_) => {
info!("Created directory: {:?}", safe_path);
(
StatusCode::OK,
Json(SuccessResponse { message: "Directory created successfully".to_string() }),
).into_response()
}
Err(e) => {
warn!("Failed to create directory {:?}: {}", safe_path, e);
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse { error: "Failed to create directory".to_string() }),
).into_response()
}
}
}
/// Delete file or directory
/// DELETE /files/delete/<path>
async fn delete_item(
Path(path): Path<String>,
axum::extract::State(state): axum::extract::State<AppState>,
) -> Response {
let safe_path = match state.get_safe_path(&path) {
Some(p) => p,
None => {
return (
StatusCode::BAD_REQUEST,
Json(ErrorResponse { error: "Invalid path".to_string() }),
).into_response();
}
};
if !safe_path.exists() {
return (
StatusCode::NOT_FOUND,
Json(ErrorResponse { error: "File or directory not found".to_string() }),
).into_response();
}
let result = if safe_path.is_dir() {
fs::remove_dir_all(&safe_path)
} else {
fs::remove_file(&safe_path)
};
match result {
Ok(_) => {
info!("Deleted: {:?}", safe_path);
(
StatusCode::OK,
Json(SuccessResponse { message: "Deleted successfully".to_string() }),
).into_response()
}
Err(e) => {
warn!("Failed to delete {:?}: {}", safe_path, e);
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse { error: "Failed to delete".to_string() }),
).into_response()
}
}
}
/// Handle TUS upload creation
/// POST /files/upload
/// POST /files/upload/<path> (for specific directory)
async fn create_upload(
headers: HeaderMap,
axum::extract::State(state): axum::extract::State<AppState>,
) -> impl IntoResponse {
create_upload_impl(headers, state, None).await
}
/// Handle TUS upload creation with path
/// POST /files/upload/<path>
async fn create_upload_with_path(
Path(path): Path<String>,
headers: HeaderMap,
axum::extract::State(state): axum::extract::State<AppState>,
) -> impl IntoResponse {
create_upload_impl(headers, state, Some(path)).await
}
/// Internal implementation for upload creation
async fn create_upload_impl(
headers: HeaderMap,
state: AppState,
target_path: Option<String>,
) -> impl IntoResponse {
let upload_id = uuid::Uuid::new_v4().to_string();
// Get filename from Upload-Metadata header (base64 encoded)
// TUS format: "filename <base64-encoded-filename>,type <base64-encoded-type>"
let filename = headers
.get("upload-metadata")
.and_then(|v| v.to_str().ok())
.and_then(|metadata| {
info!("Upload metadata received: {}", metadata);
// Parse TUS metadata format: "filename <base64>,type <base64>"
for pair in metadata.split(',') {
let parts: Vec<&str> = pair.trim().split_whitespace().collect();
if parts.len() == 2 && parts[0] == "filename" {
use base64::Engine;
if let Ok(decoded_bytes) = base64::engine::general_purpose::STANDARD.decode(parts[1]) {
if let Ok(decoded_filename) = String::from_utf8(decoded_bytes) {
info!("Extracted filename: {}", decoded_filename);
return Some(decoded_filename);
}
}
}
}
None
})
.unwrap_or_else(|| {
warn!("Could not extract filename from metadata, using fallback: upload_{}", upload_id);
format!("upload_{}", upload_id)
});
// Determine target directory - use provided path or current directory
let target_dir = if let Some(path) = target_path {
if path.is_empty() {
state.base_dir.clone()
} else {
state.base_dir.join(&path)
}
} else {
state.base_dir.clone()
};
// Create target directory if it doesn't exist
if let Err(e) = fs::create_dir_all(&target_dir) {
warn!("Failed to create target directory: {}", e);
}
// Store upload metadata with preserved filename
let upload_path = target_dir.join(&filename);
// Store the upload info for later use
if let Ok(mut uploads) = state.uploads.lock() {
uploads.insert(upload_id.clone(), (filename.clone(), upload_path));
}
let mut response_headers = HeaderMap::new();
response_headers.insert("Location", format!("/files/upload/{}", upload_id).parse().unwrap());
response_headers.insert("Tus-Resumable", "1.0.0".parse().unwrap());
info!("Created upload with ID: {} for file: {}", upload_id, filename);
(StatusCode::CREATED, response_headers, "")
}
/// Handle TUS upload data
/// PATCH /files/upload/<upload_id>
async fn tus_upload_chunk(
Path(upload_id): Path<String>,
axum::extract::State(state): axum::extract::State<AppState>,
_headers: HeaderMap,
body: axum::body::Bytes,
) -> impl IntoResponse {
// Get upload info from tracking
let upload_info = {
if let Ok(uploads) = state.uploads.lock() {
uploads.get(&upload_id).cloned()
} else {
None
}
};
let (filename, file_path) = match upload_info {
Some(info) => info,
None => {
warn!("Upload ID not found: {}", upload_id);
return (StatusCode::NOT_FOUND, HeaderMap::new(), "").into_response();
}
};
// Write the file data to disk
match std::fs::write(&file_path, &body) {
Ok(_) => {
info!("Successfully saved file: {} ({} bytes)", filename, body.len());
// Clean up upload tracking
if let Ok(mut uploads) = state.uploads.lock() {
uploads.remove(&upload_id);
}
let mut response_headers = HeaderMap::new();
response_headers.insert("Tus-Resumable", "1.0.0".parse().unwrap());
response_headers.insert("Upload-Offset", body.len().to_string().parse().unwrap());
(StatusCode::NO_CONTENT, response_headers, "").into_response()
}
Err(e) => {
warn!("Failed to save file {}: {}", filename, e);
(StatusCode::INTERNAL_SERVER_ERROR, HeaderMap::new(), "").into_response()
}
}
}
/// Download file
/// GET /files/download/<path>
async fn download_file(
Path(path): Path<String>,
axum::extract::State(state): axum::extract::State<AppState>,
) -> impl IntoResponse {
let safe_path = match state.get_safe_path(&path) {
Some(p) => p,
None => {
return (
StatusCode::BAD_REQUEST,
Json(ErrorResponse { error: "Invalid path".to_string() }),
).into_response();
}
};
if !safe_path.exists() || safe_path.is_dir() {
return (
StatusCode::NOT_FOUND,
Json(ErrorResponse { error: "File not found".to_string() }),
).into_response();
}
match fs::read(&safe_path) {
Ok(contents) => {
let mut headers = HeaderMap::new();
headers.insert(
"Content-Disposition",
format!("attachment; filename=\"{}\"",
safe_path.file_name().unwrap_or_default().to_string_lossy())
.parse().unwrap()
);
(StatusCode::OK, headers, contents).into_response()
}
Err(e) => {
warn!("Failed to read file {:?}: {}", safe_path, e);
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse { error: "Failed to read file".to_string() }),
).into_response()
}
}
}
/// Health check endpoint
async fn health_check() -> impl IntoResponse {
Json(serde_json::json!({
"status": "ok",
"message": "Mock file server is running"
}))
}
/// Root endpoint with API info
async fn root() -> impl IntoResponse {
Json(serde_json::json!({
"name": "Mock File Server",
"description": "A Rust mock server for testing the file browser component",
"endpoints": {
"GET /files/list/<path>": "List directory contents",
"POST /files/dirs/<path>": "Create directory",
"DELETE /files/delete/<path>": "Delete file/directory",
"POST /files/upload": "Upload file (TUS protocol)",
"PATCH /files/upload/<id>": "Upload file chunk",
"GET /files/download/<path>": "Download file",
"GET /health": "Health check"
}
}))
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// Initialize tracing
tracing_subscriber::fmt::init();
// Initialize app state
let state = AppState::new()?;
info!("Base directory: {:?}", state.base_dir);
// Build the router
let app = Router::new()
.route("/", get(root))
.route("/health", get(health_check))
.route("/files/list/*path", get(list_directory))
.route("/files/list/", get(list_root_directory))
.route("/files/dirs/*path", post(create_directory))
.route("/files/delete/*path", delete(delete_item))
.route("/files/upload", post(create_upload))
.route("/files/upload/to/*path", post(create_upload_with_path))
.route("/files/upload/:upload_id", axum::routing::patch(tus_upload_chunk))
.route("/files/download/*path", get(download_file))
.layer(DefaultBodyLimit::max(500 * 1024 * 1024)) // 500MB limit for large file uploads
.layer(CorsLayer::permissive())
.with_state(state);
// Start the server
let port = std::env::var("PORT").unwrap_or_else(|_| "3001".to_string());
let addr = format!("0.0.0.0:{}", port);
info!("🚀 Mock File Server starting on http://{}", addr);
info!("📋 Available endpoints:");
info!(" GET /files/list/<path> - List directory contents");
info!(" POST /files/dirs/<path> - Create directory");
info!(" DELETE /files/delete/<path> - Delete file/directory");
info!(" POST /files/upload - Upload file (TUS)");
info!(" GET /files/download/<path> - Download file");
info!(" GET /health - Health check");
let listener = tokio::net::TcpListener::bind(&addr).await?;
axum::serve(listener, app).await?;
Ok(())
}