Compare commits

..

5 Commits

Author SHA1 Message Date
1f155d1bfb ... 2025-04-09 08:11:28 +02:00
b9df692a54 ... 2025-04-09 07:56:46 +02:00
44cbf20d7b ... 2025-04-09 07:54:37 +02:00
5e4dcbf77c ... 2025-04-09 07:11:38 +02:00
b93894632a ... 2025-04-09 06:20:35 +02:00
40 changed files with 3107 additions and 0 deletions

1
.gitignore vendored
View File

@ -14,3 +14,4 @@ Cargo.lock
# MSVC Windows builds of rustc generate these, which store debugging information # MSVC Windows builds of rustc generate these, which store debugging information
*.pdb *.pdb
doctreegolang/

18
doctree/Cargo.toml Normal file
View File

@ -0,0 +1,18 @@
[package]
name = "doctree"
version = "0.1.0"
edition = "2024"
[lib]
path = "src/lib.rs"
[dependencies]
walkdir = "2.3.3"
pulldown-cmark = "0.9.3"
thiserror = "1.0.40"
lazy_static = "1.4.0"
toml = "0.7.3"
serde = { version = "1.0", features = ["derive"] }
redis = { version = "0.23.0", features = ["tokio-comp"] }
tokio = { version = "1.28.0", features = ["full"] }
sal = { git = "https://git.ourworld.tf/herocode/sal.git", branch = "main" }

454
doctree/src/collection.rs Normal file
View File

@ -0,0 +1,454 @@
use std::path::{Path, PathBuf};
use walkdir::WalkDir;
use std::fs;
use crate::error::{DocTreeError, Result};
use crate::storage::RedisStorage;
use crate::utils::{name_fix, markdown_to_html, ensure_md_extension};
use crate::include::process_includes;
/// Collection represents a collection of markdown pages and files
#[derive(Clone)]
pub struct Collection {
/// Base path of the collection
pub path: PathBuf,
/// Name of the collection (namefixed)
pub name: String,
/// Redis storage backend
pub storage: RedisStorage,
}
/// Builder for Collection
pub struct CollectionBuilder {
/// Base path of the collection
path: PathBuf,
/// Name of the collection (namefixed)
name: String,
/// Redis storage backend
storage: Option<RedisStorage>,
}
impl Collection {
/// Create a new CollectionBuilder
///
/// # Arguments
///
/// * `path` - Base path of the collection
/// * `name` - Name of the collection
///
/// # Returns
///
/// A new CollectionBuilder
pub fn builder<P: AsRef<Path>>(path: P, name: &str) -> CollectionBuilder {
CollectionBuilder {
path: path.as_ref().to_path_buf(),
name: name_fix(name),
storage: None,
}
}
/// Scan walks over the path and finds all files and .md files
/// It stores the relative positions in Redis
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn scan(&self) -> Result<()> {
println!("DEBUG: Scanning collection '{}' at path {:?}", self.name, self.path);
// Delete existing collection data if any
println!("DEBUG: Deleting existing collection data from Redis key 'collections:{}'", self.name);
self.storage.delete_collection(&self.name)?;
// Store the collection's path in Redis
println!("DEBUG: Storing collection path in Redis key 'collections:{}:path'", self.name);
self.storage.store_collection_path(&self.name, &self.path.to_string_lossy())?;
// Walk through the directory
let walker = WalkDir::new(&self.path);
for entry_result in walker {
// Handle entry errors
let entry = match entry_result {
Ok(entry) => entry,
Err(e) => {
// Log the error and continue
eprintln!("Error walking directory: {}", e);
continue;
}
};
// Skip directories
if entry.file_type().is_dir() {
continue;
}
// Skip files that start with a dot (.)
let file_name = entry.file_name().to_string_lossy();
if file_name.starts_with(".") {
continue;
}
// Get the relative path from the base path
let rel_path = match entry.path().strip_prefix(&self.path) {
Ok(path) => path,
Err(_) => {
// Log the error and continue
eprintln!("Failed to get relative path for: {:?}", entry.path());
continue;
}
};
// Get the filename and apply namefix
let filename = entry.file_name().to_string_lossy().to_string();
let namefixed_filename = name_fix(&filename);
// Determine if this is a document (markdown file) or an image
let is_markdown = filename.to_lowercase().ends_with(".md");
let is_image = filename.to_lowercase().ends_with(".png") ||
filename.to_lowercase().ends_with(".jpg") ||
filename.to_lowercase().ends_with(".jpeg") ||
filename.to_lowercase().ends_with(".gif") ||
filename.to_lowercase().ends_with(".svg");
let file_type = if is_markdown {
"document"
} else if is_image {
"image"
} else {
"file"
};
// Store in Redis using the namefixed filename as the key
// Store the original relative path to preserve case and special characters
println!("DEBUG: Storing {} '{}' in Redis key 'collections:{}' with key '{}' and value '{}'",
file_type, filename, self.name, namefixed_filename, rel_path.to_string_lossy());
self.storage.store_collection_entry(
&self.name,
&namefixed_filename,
&rel_path.to_string_lossy()
)?;
}
Ok(())
}
/// Get a page by name and return its markdown content
///
/// # Arguments
///
/// * `page_name` - Name of the page
///
/// # Returns
///
/// The page content or an error
pub fn page_get(&self, page_name: &str) -> Result<String> {
// Apply namefix to the page name
let namefixed_page_name = name_fix(page_name);
// Ensure it has .md extension
let namefixed_page_name = ensure_md_extension(&namefixed_page_name);
// Get the relative path from Redis
let rel_path = self.storage.get_collection_entry(&self.name, &namefixed_page_name)
.map_err(|_| DocTreeError::PageNotFound(page_name.to_string()))?;
// Check if the path is valid
if self.path.as_os_str().is_empty() {
// If the path is empty, we're working with a collection loaded from Redis
// Return an error since the actual file path is not available
return Err(DocTreeError::IoError(std::io::Error::new(
std::io::ErrorKind::NotFound,
format!("File path not available for {} in collection {}", page_name, self.name)
)));
}
// Read the file
let full_path = self.path.join(rel_path);
let content = fs::read_to_string(full_path)
.map_err(|e| DocTreeError::IoError(e))?;
// Skip include processing at this level to avoid infinite recursion
// Include processing will be done at the higher level
Ok(content)
}
/// Create or update a page in the collection
///
/// # Arguments
///
/// * `page_name` - Name of the page
/// * `content` - Content of the page
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn page_set(&self, page_name: &str, content: &str) -> Result<()> {
// Apply namefix to the page name
let namefixed_page_name = name_fix(page_name);
// Ensure it has .md extension
let namefixed_page_name = ensure_md_extension(&namefixed_page_name);
// Create the full path
let full_path = self.path.join(&namefixed_page_name);
// Create directories if needed
if let Some(parent) = full_path.parent() {
fs::create_dir_all(parent).map_err(DocTreeError::IoError)?;
}
// Write content to file
fs::write(&full_path, content).map_err(DocTreeError::IoError)?;
// Update Redis
self.storage.store_collection_entry(&self.name, &namefixed_page_name, &namefixed_page_name)?;
Ok(())
}
/// Delete a page from the collection
///
/// # Arguments
///
/// * `page_name` - Name of the page
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn page_delete(&self, page_name: &str) -> Result<()> {
// Apply namefix to the page name
let namefixed_page_name = name_fix(page_name);
// Ensure it has .md extension
let namefixed_page_name = ensure_md_extension(&namefixed_page_name);
// Get the relative path from Redis
let rel_path = self.storage.get_collection_entry(&self.name, &namefixed_page_name)
.map_err(|_| DocTreeError::PageNotFound(page_name.to_string()))?;
// Delete the file
let full_path = self.path.join(rel_path);
fs::remove_file(full_path).map_err(DocTreeError::IoError)?;
// Remove from Redis
self.storage.delete_collection_entry(&self.name, &namefixed_page_name)?;
Ok(())
}
/// List all pages in the collection
///
/// # Returns
///
/// A vector of page names or an error
pub fn page_list(&self) -> Result<Vec<String>> {
// Get all keys from Redis
let keys = self.storage.list_collection_entries(&self.name)?;
// Filter to only include .md files
let pages = keys.into_iter()
.filter(|key| key.ends_with(".md"))
.collect();
Ok(pages)
}
/// Get the URL for a file
///
/// # Arguments
///
/// * `file_name` - Name of the file
///
/// # Returns
///
/// The URL for the file or an error
pub fn file_get_url(&self, file_name: &str) -> Result<String> {
// Apply namefix to the file name
let namefixed_file_name = name_fix(file_name);
// Get the relative path from Redis
let rel_path = self.storage.get_collection_entry(&self.name, &namefixed_file_name)
.map_err(|_| DocTreeError::FileNotFound(file_name.to_string()))?;
// Construct a URL for the file
let url = format!("/collections/{}/files/{}", self.name, rel_path);
Ok(url)
}
/// Add or update a file in the collection
///
/// # Arguments
///
/// * `file_name` - Name of the file
/// * `content` - Content of the file
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn file_set(&self, file_name: &str, content: &[u8]) -> Result<()> {
// Apply namefix to the file name
let namefixed_file_name = name_fix(file_name);
// Create the full path
let full_path = self.path.join(&namefixed_file_name);
// Create directories if needed
if let Some(parent) = full_path.parent() {
fs::create_dir_all(parent).map_err(DocTreeError::IoError)?;
}
// Write content to file
fs::write(&full_path, content).map_err(DocTreeError::IoError)?;
// Update Redis
self.storage.store_collection_entry(&self.name, &namefixed_file_name, &namefixed_file_name)?;
Ok(())
}
/// Delete a file from the collection
///
/// # Arguments
///
/// * `file_name` - Name of the file
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn file_delete(&self, file_name: &str) -> Result<()> {
// Apply namefix to the file name
let namefixed_file_name = name_fix(file_name);
// Get the relative path from Redis
let rel_path = self.storage.get_collection_entry(&self.name, &namefixed_file_name)
.map_err(|_| DocTreeError::FileNotFound(file_name.to_string()))?;
// Delete the file
let full_path = self.path.join(rel_path);
fs::remove_file(full_path).map_err(DocTreeError::IoError)?;
// Remove from Redis
self.storage.delete_collection_entry(&self.name, &namefixed_file_name)?;
Ok(())
}
/// List all files (non-markdown) in the collection
///
/// # Returns
///
/// A vector of file names or an error
pub fn file_list(&self) -> Result<Vec<String>> {
// Get all keys from Redis
let keys = self.storage.list_collection_entries(&self.name)?;
// Filter to exclude .md files
let files = keys.into_iter()
.filter(|key| !key.ends_with(".md"))
.collect();
Ok(files)
}
/// Get the relative path of a page in the collection
///
/// # Arguments
///
/// * `page_name` - Name of the page
///
/// # Returns
///
/// The relative path of the page or an error
pub fn page_get_path(&self, page_name: &str) -> Result<String> {
// Apply namefix to the page name
let namefixed_page_name = name_fix(page_name);
// Ensure it has .md extension
let namefixed_page_name = ensure_md_extension(&namefixed_page_name);
// Get the relative path from Redis
self.storage.get_collection_entry(&self.name, &namefixed_page_name)
.map_err(|_| DocTreeError::PageNotFound(page_name.to_string()))
}
/// Get a page by name and return its HTML content
///
/// # Arguments
///
/// * `page_name` - Name of the page
/// * `doctree` - Optional DocTree instance for include processing
///
/// # Returns
///
/// The HTML content of the page or an error
pub fn page_get_html(&self, page_name: &str, doctree: Option<&crate::doctree::DocTree>) -> Result<String> {
// Get the markdown content
let markdown = self.page_get(page_name)?;
// Process includes if doctree is provided
let processed_markdown = if let Some(dt) = doctree {
process_includes(&markdown, &self.name, dt)?
} else {
markdown
};
// Convert markdown to HTML
let html = markdown_to_html(&processed_markdown);
Ok(html)
}
/// Get information about the Collection
///
/// # Returns
///
/// A map of information
pub fn info(&self) -> std::collections::HashMap<String, String> {
let mut info = std::collections::HashMap::new();
info.insert("name".to_string(), self.name.clone());
info.insert("path".to_string(), self.path.to_string_lossy().to_string());
info
}
}
impl CollectionBuilder {
/// Set the storage backend
///
/// # Arguments
///
/// * `storage` - Redis storage backend
///
/// # Returns
///
/// Self for method chaining
pub fn with_storage(mut self, storage: RedisStorage) -> Self {
self.storage = Some(storage);
self
}
/// Build the Collection
///
/// # Returns
///
/// A new Collection or an error
pub fn build(self) -> Result<Collection> {
let storage = self.storage.ok_or_else(|| {
DocTreeError::MissingParameter("storage".to_string())
})?;
let collection = Collection {
path: self.path,
name: self.name,
storage,
};
Ok(collection)
}
}

721
doctree/src/doctree.rs Normal file
View File

@ -0,0 +1,721 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::fs;
use serde::Deserialize;
use crate::collection::Collection;
use crate::error::{DocTreeError, Result};
use crate::storage::RedisStorage;
use crate::include::process_includes;
use crate::utils::name_fix;
/// Configuration for a collection from a .collection file
#[derive(Deserialize, Default, Debug)]
struct CollectionConfig {
/// Optional name of the collection
name: Option<String>,
// Add other configuration options as needed
}
// Global variable to track the current collection name
// This is for compatibility with the Go implementation
lazy_static::lazy_static! {
static ref CURRENT_COLLECTION_NAME: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
}
// Global variable to track the current Collection
// This is for compatibility with the Go implementation
/// DocTree represents a manager for multiple collections
pub struct DocTree {
/// Map of collections by name
pub collections: HashMap<String, Collection>,
/// Default collection name
pub default_collection: Option<String>,
/// Redis storage backend
storage: RedisStorage,
/// For backward compatibility
pub name: String,
/// For backward compatibility
pub path: PathBuf,
}
/// Builder for DocTree
pub struct DocTreeBuilder {
/// Map of collections by name
collections: HashMap<String, Collection>,
/// Default collection name
default_collection: Option<String>,
/// Redis storage backend
storage: Option<RedisStorage>,
/// For backward compatibility
name: Option<String>,
/// For backward compatibility
path: Option<PathBuf>,
}
impl DocTree {
/// Create a new DocTreeBuilder
///
/// # Returns
///
/// A new DocTreeBuilder
pub fn builder() -> DocTreeBuilder {
DocTreeBuilder {
collections: HashMap::new(),
default_collection: None,
storage: None,
name: None,
path: None,
}
}
/// Add a collection to the DocTree
///
/// # Arguments
///
/// * `path` - Base path of the collection
/// * `name` - Name of the collection
///
/// # Returns
///
/// The added collection or an error
pub fn add_collection<P: AsRef<Path>>(&mut self, path: P, name: &str) -> Result<&Collection> {
// Create a new collection
let namefixed = name_fix(name);
let collection = Collection::builder(path, &namefixed)
.with_storage(self.storage.clone())
.build()?;
// Scan the collection
collection.scan()?;
// Add to the collections map
self.collections.insert(collection.name.clone(), collection);
// Return a reference to the added collection
self.collections.get(&namefixed).ok_or_else(|| {
DocTreeError::CollectionNotFound(namefixed.clone())
})
}
/// Get a collection by name
///
/// # Arguments
///
/// * `name` - Name of the collection
///
/// # Returns
///
/// The collection or an error
pub fn get_collection(&self, name: &str) -> Result<&Collection> {
// For compatibility with tests, apply namefix
let namefixed = name_fix(name);
// Check if the collection exists
self.collections.get(&namefixed).ok_or_else(|| {
DocTreeError::CollectionNotFound(name.to_string())
})
}
/// Delete a collection from the DocTree
///
/// # Arguments
///
/// * `name` - Name of the collection
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn delete_collection(&mut self, name: &str) -> Result<()> {
// For compatibility with tests, apply namefix
let namefixed = name_fix(name);
// Check if the collection exists
if !self.collections.contains_key(&namefixed) {
return Err(DocTreeError::CollectionNotFound(name.to_string()));
}
// Delete from Redis
self.storage.delete_collection(&namefixed)?;
// Remove from the collections map
self.collections.remove(&namefixed);
Ok(())
}
/// Delete all collections from the DocTree and Redis
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn delete_all_collections(&mut self) -> Result<()> {
// Delete all collections from Redis
self.storage.delete_all_collections()?;
// Clear the collections map
self.collections.clear();
// Reset the default collection
self.default_collection = None;
Ok(())
}
/// List all collections
///
/// # Returns
///
/// A vector of collection names
pub fn list_collections(&self) -> Vec<String> {
// First, try to get collections from the in-memory map
let mut collections = self.collections.keys().cloned().collect::<Vec<String>>();
// If no collections are found, try to get them from Redis
if collections.is_empty() {
// Get all collection keys from Redis
if let Ok(keys) = self.storage.list_all_collections() {
collections = keys;
}
}
collections
}
/// Load a collection from Redis
///
/// # Arguments
///
/// * `name` - Name of the collection
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn load_collection(&mut self, name: &str) -> Result<()> {
// Check if the collection exists in Redis
if !self.storage.collection_exists(name)? {
return Err(DocTreeError::CollectionNotFound(name.to_string()));
}
// Try to get the collection's path from Redis
let path = match self.storage.get_collection_path(name) {
Ok(path_str) => {
println!("DEBUG: Found collection path in Redis: {}", path_str);
PathBuf::from(path_str)
},
Err(e) => {
println!("DEBUG: Could not retrieve collection path from Redis: {}", e);
PathBuf::new() // Fallback to empty path if not found
}
};
// Create a new collection
let collection = Collection {
path,
name: name.to_string(),
storage: self.storage.clone(),
};
// Add to the collections map
self.collections.insert(name.to_string(), collection);
Ok(())
}
/// Load all collections from Redis
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn load_collections_from_redis(&mut self) -> Result<()> {
// Get all collection names from Redis
let collections = self.storage.list_all_collections()?;
// Load each collection
for name in collections {
// Skip if already loaded
if self.collections.contains_key(&name) {
continue;
}
// Try to get the collection's path from Redis
let path = match self.storage.get_collection_path(&name) {
Ok(path_str) => {
println!("DEBUG: Found collection path in Redis: {}", path_str);
PathBuf::from(path_str)
},
Err(e) => {
println!("DEBUG: Could not retrieve collection path from Redis: {}", e);
PathBuf::new() // Fallback to empty path if not found
}
};
// Create a new collection
let collection = Collection {
path,
name: name.clone(),
storage: self.storage.clone(),
};
// Add to the collections map
self.collections.insert(name, collection);
}
Ok(())
}
/// Get a page by name from a specific collection
///
/// # Arguments
///
/// * `collection_name` - Name of the collection (optional)
/// * `page_name` - Name of the page
///
/// # Returns
///
/// The page content or an error
pub fn page_get(&mut self, collection_name: Option<&str>, page_name: &str) -> Result<String> {
let (collection_name, page_name) = self.resolve_collection_and_page(collection_name, page_name)?;
// Get the collection
let collection = self.get_collection(&collection_name)?;
// Get the page content
let content = collection.page_get(page_name)?;
// Process includes
let processed_content = process_includes(&content, &collection_name, self)?;
Ok(processed_content)
}
/// Get a page by name from a specific collection and return its HTML content
///
/// # Arguments
///
/// * `collection_name` - Name of the collection (optional)
/// * `page_name` - Name of the page
///
/// # Returns
///
/// The HTML content or an error
pub fn page_get_html(&self, collection_name: Option<&str>, page_name: &str) -> Result<String> {
let (collection_name, page_name) = self.resolve_collection_and_page(collection_name, page_name)?;
// Get the collection
let collection = self.get_collection(&collection_name)?;
// Get the HTML
collection.page_get_html(page_name, Some(self))
}
/// Get the URL for a file in a specific collection
///
/// # Arguments
///
/// * `collection_name` - Name of the collection (optional)
/// * `file_name` - Name of the file
///
/// # Returns
///
/// The URL for the file or an error
pub fn file_get_url(&self, collection_name: Option<&str>, file_name: &str) -> Result<String> {
let (collection_name, file_name) = self.resolve_collection_and_page(collection_name, file_name)?;
// Get the collection
let collection = self.get_collection(&collection_name)?;
// Get the URL
collection.file_get_url(file_name)
}
/// Get the path to a page in the default collection
///
/// # Arguments
///
/// * `page_name` - Name of the page
///
/// # Returns
///
/// The path to the page or an error
pub fn page_get_path(&self, page_name: &str) -> Result<String> {
// Check if a default collection is set
let default_collection = self.default_collection.as_ref().ok_or_else(|| {
DocTreeError::NoDefaultCollection
})?;
// Get the collection
let collection = self.get_collection(default_collection)?;
// Get the path
collection.page_get_path(page_name)
}
/// Get information about the DocTree
///
/// # Returns
///
/// A map of information
pub fn info(&self) -> HashMap<String, String> {
let mut info = HashMap::new();
info.insert("name".to_string(), self.name.clone());
info.insert("path".to_string(), self.path.to_string_lossy().to_string());
info.insert("collections".to_string(), self.collections.len().to_string());
info
}
/// Scan the default collection
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn scan(&self) -> Result<()> {
// Check if a default collection is set
let default_collection = self.default_collection.as_ref().ok_or_else(|| {
DocTreeError::NoDefaultCollection
})?;
// Get the collection
let collection = self.get_collection(default_collection)?;
// Scan the collection
collection.scan()
}
/// Resolve collection and page names
///
/// # Arguments
///
/// * `collection_name` - Name of the collection (optional)
/// * `page_name` - Name of the page
///
/// # Returns
///
/// A tuple of (collection_name, page_name) or an error
fn resolve_collection_and_page<'a>(&self, collection_name: Option<&'a str>, page_name: &'a str) -> Result<(String, &'a str)> {
match collection_name {
Some(name) => Ok((name_fix(name), page_name)),
None => {
// Use the default collection
let default_collection = self.default_collection.as_ref().ok_or_else(|| {
DocTreeError::NoDefaultCollection
})?;
Ok((default_collection.clone(), page_name))
}
}
}
/// Recursively scan directories for .collection files and add them as collections
///
/// # Arguments
///
/// * `root_path` - The root path to start scanning from
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn scan_collections<P: AsRef<Path>>(&mut self, root_path: P) -> Result<()> {
let root_path = root_path.as_ref();
println!("DEBUG: Scanning for collections in directory: {:?}", root_path);
// Walk through the directory tree
for entry in walkdir::WalkDir::new(root_path).follow_links(true) {
let entry = match entry {
Ok(entry) => entry,
Err(e) => {
eprintln!("Error walking directory: {}", e);
continue;
}
};
// Skip directories and files that start with a dot (.)
let file_name = entry.file_name().to_string_lossy();
if file_name.starts_with(".") {
continue;
}
// Skip non-directories
if !entry.file_type().is_dir() {
continue;
}
// Check if this directory contains a .collection file
let collection_file_path = entry.path().join(".collection");
if collection_file_path.exists() {
// Found a collection directory
println!("DEBUG: Found .collection file at: {:?}", collection_file_path);
let dir_path = entry.path();
// Get the directory name as a fallback collection name
let dir_name = dir_path.file_name()
.and_then(|name| name.to_str())
.unwrap_or("unnamed");
// Try to read and parse the .collection file
let collection_name = match fs::read_to_string(&collection_file_path) {
Ok(content) => {
if content.trim().is_empty() {
// Empty file, use directory name (name_fixed)
dir_name.to_string() // We'll apply name_fix later at line 372
} else {
// Parse as TOML
match toml::from_str::<CollectionConfig>(&content) {
Ok(config) => {
// Use the name from config if available, otherwise use directory name
config.name.unwrap_or_else(|| dir_name.to_string())
},
Err(e) => {
eprintln!("Error parsing .collection file at {:?}: {}", collection_file_path, e);
dir_name.to_string()
}
}
}
},
Err(e) => {
eprintln!("Error reading .collection file at {:?}: {}", collection_file_path, e);
dir_name.to_string()
}
};
// Apply name_fix to the collection name
let namefixed_collection_name = name_fix(&collection_name);
// Add the collection to the DocTree
println!("DEBUG: Adding collection '{}' from directory {:?}", namefixed_collection_name, dir_path);
match self.add_collection(dir_path, &namefixed_collection_name) {
Ok(collection) => {
println!("DEBUG: Successfully added collection '{}' from {:?}", namefixed_collection_name, dir_path);
println!("DEBUG: Collection stored in Redis key 'collections:{}'", collection.name);
// Count documents and images
let docs = collection.page_list().unwrap_or_default();
let files = collection.file_list().unwrap_or_default();
let images = files.iter().filter(|f|
f.ends_with(".png") || f.ends_with(".jpg") ||
f.ends_with(".jpeg") || f.ends_with(".gif") ||
f.ends_with(".svg")
).count();
println!("DEBUG: Collection '{}' contains {} documents and {} images",
namefixed_collection_name, docs.len(), images);
},
Err(e) => {
eprintln!("Error adding collection '{}' from {:?}: {}", namefixed_collection_name, dir_path, e);
}
}
}
}
Ok(())
}
}
impl DocTreeBuilder {
/// Set the storage backend
///
/// # Arguments
///
/// * `storage` - Redis storage backend
///
/// # Returns
///
/// Self for method chaining
pub fn with_storage(mut self, storage: RedisStorage) -> Self {
self.storage = Some(storage);
self
}
/// Add a collection
///
/// # Arguments
///
/// * `path` - Base path of the collection
/// * `name` - Name of the collection
///
/// # Returns
///
/// Self for method chaining or an error
pub fn with_collection<P: AsRef<Path>>(mut self, path: P, name: &str) -> Result<Self> {
// Ensure storage is set
let storage = self.storage.as_ref().ok_or_else(|| {
DocTreeError::MissingParameter("storage".to_string())
})?;
// Create a new collection
let namefixed = name_fix(name);
let collection = Collection::builder(path.as_ref(), &namefixed)
.with_storage(storage.clone())
.build()?;
// Scan the collection
collection.scan()?;
// Add to the collections map
self.collections.insert(collection.name.clone(), collection);
// For backward compatibility
if self.name.is_none() {
self.name = Some(namefixed.clone());
}
if self.path.is_none() {
self.path = Some(path.as_ref().to_path_buf());
}
Ok(self)
}
/// Set the default collection
///
/// # Arguments
///
/// * `name` - Name of the default collection
///
/// # Returns
///
/// Self for method chaining
pub fn with_default_collection(mut self, name: &str) -> Self {
self.default_collection = Some(name_fix(name));
self
}
/// Scan for collections in the given root path
///
/// # Arguments
///
/// * `root_path` - The root path to scan for collections
///
/// # Returns
///
/// Self for method chaining or an error
pub fn scan_collections<P: AsRef<Path>>(self, root_path: P) -> Result<Self> {
// Ensure storage is set
let storage = self.storage.as_ref().ok_or_else(|| {
DocTreeError::MissingParameter("storage".to_string())
})?;
// Create a temporary DocTree to scan collections
let mut temp_doctree = DocTree {
collections: HashMap::new(),
default_collection: None,
storage: storage.clone(),
name: self.name.clone().unwrap_or_default(),
path: self.path.clone().unwrap_or_else(|| PathBuf::from("")),
};
// Scan for collections
temp_doctree.scan_collections(root_path)?;
// Create a new builder with the scanned collections
let mut new_builder = self;
for (name, collection) in temp_doctree.collections {
new_builder.collections.insert(name.clone(), collection);
// If no default collection is set, use the first one found
if new_builder.default_collection.is_none() {
new_builder.default_collection = Some(name);
}
}
Ok(new_builder)
}
/// Build the DocTree
///
/// # Returns
///
/// A new DocTree or an error
pub fn build(self) -> Result<DocTree> {
// Ensure storage is set
let storage = self.storage.ok_or_else(|| {
DocTreeError::MissingParameter("storage".to_string())
})?;
// Create the DocTree
let mut doctree = DocTree {
collections: self.collections,
default_collection: self.default_collection,
storage: storage.clone(),
name: self.name.unwrap_or_default(),
path: self.path.unwrap_or_else(|| PathBuf::from("")),
};
// Set the global current collection name if a default collection is set
if let Some(default_collection) = &doctree.default_collection {
let mut current_collection_name = CURRENT_COLLECTION_NAME.lock().unwrap();
*current_collection_name = Some(default_collection.clone());
}
// Load all collections from Redis
doctree.load_collections_from_redis()?;
Ok(doctree)
}
}
/// Create a new DocTree instance
///
/// For backward compatibility, it also accepts path and name parameters
/// to create a DocTree with a single collection
///
/// # Arguments
///
/// * `args` - Optional path and name for backward compatibility
///
/// # Returns
///
/// A new DocTree or an error
pub fn new<P: AsRef<Path>>(args: &[&str]) -> Result<DocTree> {
let storage = RedisStorage::new("redis://localhost:6379")?;
let mut builder = DocTree::builder().with_storage(storage);
// For backward compatibility with existing code
if args.len() == 2 {
let path = args[0];
let name = args[1];
// Apply namefix for compatibility with tests
let namefixed = name_fix(name);
// Add the collection
builder = builder.with_collection(path, &namefixed)?;
// Set the default collection
builder = builder.with_default_collection(&namefixed);
}
builder.build()
}
/// Create a new DocTree by scanning a directory for collections
///
/// # Arguments
///
/// * `root_path` - The root path to scan for collections
///
/// # Returns
///
/// A new DocTree or an error
pub fn from_directory<P: AsRef<Path>>(root_path: P) -> Result<DocTree> {
let storage = RedisStorage::new("redis://localhost:6379")?;
DocTree::builder()
.with_storage(storage)
.scan_collections(root_path)?
.build()
}

48
doctree/src/error.rs Normal file
View File

@ -0,0 +1,48 @@
use thiserror::Error;
/// Custom error type for the doctree library
#[derive(Error, Debug)]
pub enum DocTreeError {
/// IO error
#[error("IO error: {0}")]
IoError(#[from] std::io::Error),
/// WalkDir error
#[error("WalkDir error: {0}")]
WalkDirError(String),
/// Collection not found
#[error("Collection not found: {0}")]
CollectionNotFound(String),
/// Page not found
#[error("Page not found: {0}")]
PageNotFound(String),
/// File not found
#[error("File not found: {0}")]
FileNotFound(String),
/// Invalid include directive
#[error("Invalid include directive: {0}")]
InvalidIncludeDirective(String),
/// No default collection set
#[error("No default collection set")]
NoDefaultCollection,
/// Invalid number of arguments
#[error("Invalid number of arguments")]
InvalidArgumentCount,
/// Missing required parameter
#[error("Missing required parameter: {0}")]
MissingParameter(String),
/// Redis error
#[error("Redis error: {0}")]
RedisError(String),
}
/// Result type alias for doctree operations
pub type Result<T> = std::result::Result<T, DocTreeError>;

178
doctree/src/include.rs Normal file
View File

@ -0,0 +1,178 @@
use crate::doctree::DocTree;
use crate::error::{DocTreeError, Result};
use crate::utils::trim_spaces_and_quotes;
/// Process includes in markdown content
///
/// # Arguments
///
/// * `content` - The markdown content to process
/// * `current_collection_name` - The name of the current collection
/// * `doctree` - The DocTree instance
///
/// # Returns
///
/// The processed content or an error
pub fn process_includes(content: &str, current_collection_name: &str, doctree: &DocTree) -> Result<String> {
// Find all include directives
let lines: Vec<&str> = content.split('\n').collect();
let mut result = Vec::with_capacity(lines.len());
for line in lines {
match parse_include_line(line) {
Ok((Some(c), Some(p))) => {
// Both collection and page specified
match handle_include(&p, &c, doctree) {
Ok(include_content) => {
// Process any nested includes in the included content
match process_includes(&include_content, &c, doctree) {
Ok(processed_include_content) => {
result.push(processed_include_content);
},
Err(e) => {
result.push(format!(">>ERROR: Failed to process nested includes: {}", e));
}
}
},
Err(e) => {
result.push(format!(">>ERROR: {}", e));
}
}
},
Ok((Some(_), None)) => {
// Invalid case: collection specified but no page
result.push(format!(">>ERROR: Invalid include directive: collection specified but no page name"));
},
Ok((None, Some(p))) => {
// Only page specified, use current collection
match handle_include(&p, current_collection_name, doctree) {
Ok(include_content) => {
// Process any nested includes in the included content
match process_includes(&include_content, current_collection_name, doctree) {
Ok(processed_include_content) => {
result.push(processed_include_content);
},
Err(e) => {
result.push(format!(">>ERROR: Failed to process nested includes: {}", e));
}
}
},
Err(e) => {
result.push(format!(">>ERROR: {}", e));
}
}
},
Ok((None, None)) => {
// Not an include directive, keep the line
result.push(line.to_string());
},
Err(e) => {
// Error parsing include directive
result.push(format!(">>ERROR: Failed to process include directive: {}", e));
}
}
}
Ok(result.join("\n"))
}
/// Parse an include directive line
///
/// # Arguments
///
/// * `line` - The line to parse
///
/// # Returns
///
/// A tuple of (collection_name, page_name) or an error
///
/// Supports:
/// - !!include collectionname:'pagename'
/// - !!include collectionname:'pagename.md'
/// - !!include 'pagename'
/// - !!include collectionname:pagename
/// - !!include collectionname:pagename.md
/// - !!include name:'pagename'
/// - !!include pagename
fn parse_include_line(line: &str) -> Result<(Option<String>, Option<String>)> {
// Check if the line contains an include directive
if !line.contains("!!include") {
return Ok((None, None));
}
// Extract the part after !!include
let parts: Vec<&str> = line.splitn(2, "!!include").collect();
if parts.len() != 2 {
return Err(DocTreeError::InvalidIncludeDirective(line.to_string()));
}
// Trim spaces and check if the include part is empty
let include_text = trim_spaces_and_quotes(parts[1]);
if include_text.is_empty() {
return Err(DocTreeError::InvalidIncludeDirective(line.to_string()));
}
// Remove name: prefix if present
let include_text = if include_text.starts_with("name:") {
let text = include_text.trim_start_matches("name:").trim();
if text.is_empty() {
return Err(DocTreeError::InvalidIncludeDirective(
format!("empty page name after 'name:' prefix: {}", line)
));
}
text.to_string()
} else {
include_text
};
// Check if it contains a collection reference (has a colon)
if include_text.contains(':') {
let parts: Vec<&str> = include_text.splitn(2, ':').collect();
if parts.len() != 2 {
return Err(DocTreeError::InvalidIncludeDirective(
format!("malformed collection reference: {}", include_text)
));
}
let collection_name = parts[0].trim();
let page_name = trim_spaces_and_quotes(parts[1]);
if collection_name.is_empty() {
return Err(DocTreeError::InvalidIncludeDirective(
format!("empty collection name in include directive: {}", line)
));
}
if page_name.is_empty() {
return Err(DocTreeError::InvalidIncludeDirective(
format!("empty page name in include directive: {}", line)
));
}
Ok((Some(collection_name.to_string()), Some(page_name)))
} else {
// No collection specified, just a page name
Ok((None, Some(include_text)))
}
}
/// Handle an include directive
///
/// # Arguments
///
/// * `page_name` - The name of the page to include
/// * `collection_name` - The name of the collection
/// * `doctree` - The DocTree instance
///
/// # Returns
///
/// The included content or an error
fn handle_include(page_name: &str, collection_name: &str, doctree: &DocTree) -> Result<String> {
// Get the collection
let collection = doctree.get_collection(collection_name)?;
// Get the page content
let content = collection.page_get(page_name)?;
Ok(content)
}

40
doctree/src/lib.rs Normal file
View File

@ -0,0 +1,40 @@
//! DocTree is a library for managing collections of markdown documents.
//!
//! It provides functionality for scanning directories, managing collections,
//! and processing includes between documents.
// Import lazy_static for global state
extern crate lazy_static;
mod error;
mod storage;
mod utils;
mod collection;
mod doctree;
mod include;
pub use error::{DocTreeError, Result};
pub use storage::RedisStorage;
pub use collection::{Collection, CollectionBuilder};
pub use doctree::{DocTree, DocTreeBuilder, new, from_directory};
pub use include::process_includes;
#[cfg(test)]
mod tests {
use super::*;
use std::path::Path;
#[test]
fn test_doctree_builder() {
// Create a storage instance
let storage = RedisStorage::new("dummy_url").unwrap();
let doctree = DocTree::builder()
.with_storage(storage)
.build()
.unwrap();
assert_eq!(doctree.collections.len(), 0);
assert_eq!(doctree.default_collection, None);
}
}

380
doctree/src/storage.rs Normal file
View File

@ -0,0 +1,380 @@
use redis::{Client, Connection};
use std::sync::{Arc, Mutex};
use crate::error::{DocTreeError, Result};
/// Storage backend for doctree
pub struct RedisStorage {
// Redis client
client: Client,
// Connection pool
connection: Arc<Mutex<Connection>>,
}
impl RedisStorage {
/// Create a new RedisStorage instance
///
/// # Arguments
///
/// * `url` - Redis connection URL (e.g., "redis://localhost:6379")
/// This is ignored in the in-memory implementation
///
/// # Returns
///
/// A new RedisStorage instance or an error
pub fn new(url: &str) -> Result<Self> {
// Create a Redis client
let client = Client::open(url).map_err(|e| DocTreeError::RedisError(format!("Failed to connect to Redis: {}", e)))?;
// Get a connection
let connection = client.get_connection().map_err(|e| DocTreeError::RedisError(format!("Failed to get Redis connection: {}", e)))?;
Ok(Self {
client,
connection: Arc::new(Mutex::new(connection)),
})
}
/// Store a collection entry
///
/// # Arguments
///
/// * `collection` - Collection name
/// * `key` - Entry key
/// * `value` - Entry value
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn store_collection_entry(&self, collection: &str, key: &str, value: &str) -> Result<()> {
let redis_key = format!("collections:{}", collection);
println!("DEBUG: Redis operation - HSET {} {} {}", redis_key, key, value);
// Get a connection from the pool
let mut conn = self.connection.lock().unwrap();
// Store the entry using HSET
redis::cmd("HSET")
.arg(&redis_key)
.arg(key)
.arg(value)
.execute(&mut *conn);
println!("DEBUG: Stored entry in Redis - collection: '{}', key: '{}', value: '{}'",
collection, key, value);
Ok(())
}
/// Get a collection entry
///
/// # Arguments
///
/// * `collection` - Collection name
/// * `key` - Entry key
///
/// # Returns
///
/// The entry value or an error
pub fn get_collection_entry(&self, collection: &str, key: &str) -> Result<String> {
let collection_key = format!("collections:{}", collection);
println!("DEBUG: Redis operation - HGET {} {}", collection_key, key);
// Get a connection from the pool
let mut conn = self.connection.lock().unwrap();
// Get the entry using HGET
let result: Option<String> = redis::cmd("HGET")
.arg(&collection_key)
.arg(key)
.query(&mut *conn)
.map_err(|e| DocTreeError::RedisError(format!("Redis error: {}", e)))?;
// Check if the entry exists
match result {
Some(value) => {
println!("DEBUG: Retrieved entry from Redis - collection: '{}', key: '{}', value: '{}'",
collection, key, value);
Ok(value)
},
None => {
println!("DEBUG: Entry not found in Redis - collection: '{}', key: '{}'",
collection, key);
Err(DocTreeError::FileNotFound(key.to_string()))
}
}
}
/// Delete a collection entry
///
/// # Arguments
///
/// * `collection` - Collection name
/// * `key` - Entry key
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn delete_collection_entry(&self, collection: &str, key: &str) -> Result<()> {
let collection_key = format!("collections:{}", collection);
println!("DEBUG: Redis operation - HDEL {} {}", collection_key, key);
// Get a connection from the pool
let mut conn = self.connection.lock().unwrap();
// Delete the entry using HDEL
let exists: bool = redis::cmd("HEXISTS")
.arg(&collection_key)
.arg(key)
.query(&mut *conn)
.map_err(|e| DocTreeError::RedisError(format!("Redis error: {}", e)))?;
if !exists {
return Err(DocTreeError::CollectionNotFound(collection.to_string()));
}
redis::cmd("HDEL")
.arg(&collection_key)
.arg(key)
.execute(&mut *conn);
println!("DEBUG: Deleted entry from Redis - collection: '{}', key: '{}'",
collection, key);
Ok(())
}
/// List all entries in a collection
///
/// # Arguments
///
/// * `collection` - Collection name
///
/// # Returns
///
/// A vector of entry keys or an error
pub fn list_collection_entries(&self, collection: &str) -> Result<Vec<String>> {
let collection_key = format!("collections:{}", collection);
println!("DEBUG: Redis operation - HKEYS {}", collection_key);
// Get a connection from the pool
let mut conn = self.connection.lock().unwrap();
// Check if the collection exists
let exists: bool = redis::cmd("EXISTS")
.arg(&collection_key)
.query(&mut *conn)
.map_err(|e| DocTreeError::RedisError(format!("Redis error: {}", e)))?;
if !exists {
return Err(DocTreeError::CollectionNotFound(collection.to_string()));
}
// Get all keys using HKEYS
let keys: Vec<String> = redis::cmd("HKEYS")
.arg(&collection_key)
.query(&mut *conn)
.map_err(|e| DocTreeError::RedisError(format!("Redis error: {}", e)))?;
println!("DEBUG: Listed {} entries from Redis - collection: '{}'",
keys.len(), collection);
Ok(keys)
}
/// Delete a collection
///
/// # Arguments
///
/// * `collection` - Collection name
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn delete_collection(&self, collection: &str) -> Result<()> {
let redis_key = format!("collections:{}", collection);
println!("DEBUG: Redis operation - DEL {}", redis_key);
// Get a connection from the pool
let mut conn = self.connection.lock().unwrap();
// Delete the collection using DEL
redis::cmd("DEL")
.arg(&redis_key)
.execute(&mut *conn);
println!("DEBUG: Deleted collection from Redis - collection: '{}'", collection);
Ok(())
}
/// Check if a collection exists
///
/// # Arguments
///
/// * `collection` - Collection name
///
/// # Returns
///
/// true if the collection exists, false otherwise
pub fn collection_exists(&self, collection: &str) -> Result<bool> {
let collection_key = format!("collections:{}", collection);
println!("DEBUG: Redis operation - EXISTS {}", collection_key);
// Get a connection from the pool
let mut conn = self.connection.lock().unwrap();
// Check if the collection exists using EXISTS
let exists: bool = redis::cmd("EXISTS")
.arg(&collection_key)
.query(&mut *conn)
.map_err(|e| DocTreeError::RedisError(format!("Redis error: {}", e)))?;
println!("DEBUG: Collection exists check - collection: '{}', exists: {}",
collection, exists);
Ok(exists)
}
/// List all collections in Redis
///
/// # Returns
///
/// A vector of collection names or an error
pub fn list_all_collections(&self) -> Result<Vec<String>> {
println!("DEBUG: Redis operation - KEYS collections:*");
// Get a connection from the pool
let mut conn = self.connection.lock().unwrap();
// Get all collection keys
let keys: Vec<String> = redis::cmd("KEYS")
.arg("collections:*")
.query(&mut *conn)
.map_err(|e| DocTreeError::RedisError(format!("Redis error: {}", e)))?;
// Extract collection names from keys (remove the "collections:" prefix)
let collections = keys.iter()
.filter_map(|key| {
if key.starts_with("collections:") {
Some(key[12..].to_string())
} else {
None
}
})
.collect();
println!("DEBUG: Found {} collections in Redis", keys.len());
Ok(collections)
}
/// Delete all collections from Redis
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn delete_all_collections(&self) -> Result<()> {
println!("DEBUG: Redis operation - KEYS collections:*");
// Get a connection from the pool
let mut conn = self.connection.lock().unwrap();
// Get all collection keys
let keys: Vec<String> = redis::cmd("KEYS")
.arg("collections:*")
.query(&mut *conn)
.map_err(|e| DocTreeError::RedisError(format!("Redis error: {}", e)))?;
println!("DEBUG: Found {} collections in Redis", keys.len());
// Delete each collection
for key in keys {
println!("DEBUG: Redis operation - DEL {}", key);
redis::cmd("DEL")
.arg(&key)
.execute(&mut *conn);
println!("DEBUG: Deleted collection from Redis - key: '{}'", key);
}
Ok(())
}
/// Store a collection's path
///
/// # Arguments
///
/// * `collection` - Collection name
/// * `path` - Collection path
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn store_collection_path(&self, collection: &str, path: &str) -> Result<()> {
let redis_key = format!("collections:{}:path", collection);
println!("DEBUG: Redis operation - SET {} {}", redis_key, path);
// Get a connection from the pool
let mut conn = self.connection.lock().unwrap();
// Store the path using SET
redis::cmd("SET")
.arg(&redis_key)
.arg(path)
.execute(&mut *conn);
println!("DEBUG: Stored collection path in Redis - collection: '{}', path: '{}'",
collection, path);
Ok(())
}
/// Get a collection's path
///
/// # Arguments
///
/// * `collection` - Collection name
///
/// # Returns
///
/// The collection path or an error
pub fn get_collection_path(&self, collection: &str) -> Result<String> {
let redis_key = format!("collections:{}:path", collection);
println!("DEBUG: Redis operation - GET {}", redis_key);
// Get a connection from the pool
let mut conn = self.connection.lock().unwrap();
// Get the path using GET
let result: Option<String> = redis::cmd("GET")
.arg(&redis_key)
.query(&mut *conn)
.map_err(|e| DocTreeError::RedisError(format!("Redis error: {}", e)))?;
// Check if the path exists
match result {
Some(path) => {
println!("DEBUG: Retrieved collection path from Redis - collection: '{}', path: '{}'",
collection, path);
Ok(path)
},
None => {
println!("DEBUG: Collection path not found in Redis - collection: '{}'",
collection);
Err(DocTreeError::CollectionNotFound(collection.to_string()))
}
}
}
}
// Implement Clone for RedisStorage
impl Clone for RedisStorage {
fn clone(&self) -> Self {
// Create a new connection
let connection = self.client.get_connection()
.expect("Failed to get Redis connection");
Self {
client: self.client.clone(),
connection: Arc::new(Mutex::new(connection)),
}
}
}

79
doctree/src/utils.rs Normal file
View File

@ -0,0 +1,79 @@
use pulldown_cmark::{Parser, Options, html};
use sal::text;
/// Fix a name to be used as a key
///
/// This is equivalent to the tools.NameFix function in the Go implementation.
/// It normalizes the name by converting to lowercase, replacing spaces with hyphens, etc.
///
/// # Arguments
///
/// * `name` - The name to fix
///
/// # Returns
///
/// The fixed name
pub fn name_fix(text: &str) -> String {
// Use the name_fix function from the SAL library
text::name_fix(text)
}
/// Convert markdown to HTML
///
/// # Arguments
///
/// * `markdown` - The markdown content to convert
///
/// # Returns
///
/// The HTML content
pub fn markdown_to_html(markdown: &str) -> String {
let mut options = Options::empty();
options.insert(Options::ENABLE_TABLES);
options.insert(Options::ENABLE_FOOTNOTES);
options.insert(Options::ENABLE_STRIKETHROUGH);
let parser = Parser::new_ext(markdown, options);
let mut html_output = String::new();
html::push_html(&mut html_output, parser);
html_output
}
/// Trim spaces and quotes from a string
///
/// # Arguments
///
/// * `s` - The string to trim
///
/// # Returns
///
/// The trimmed string
pub fn trim_spaces_and_quotes(s: &str) -> String {
let mut result = s.trim().to_string();
// Remove surrounding quotes
if (result.starts_with('\'') && result.ends_with('\'')) ||
(result.starts_with('"') && result.ends_with('"')) {
result = result[1..result.len()-1].to_string();
}
result
}
/// Ensure a string has a .md extension
///
/// # Arguments
///
/// * `name` - The name to check
///
/// # Returns
///
/// The name with a .md extension
pub fn ensure_md_extension(name: &str) -> String {
if !name.ends_with(".md") {
format!("{}.md", name)
} else {
name.to_string()
}
}

View File

@ -0,0 +1,258 @@
# Implementation Plan: DocTree Collection Scanner
## Overview
We need to expand the doctree library to:
1. Add a recursive scan function to the DocTree struct
2. Detect directories containing `.collection` files
3. Parse `.collection` files as TOML to extract collection names
4. Replace the current `name_fix` function with the one from the sal library
5. Populate collections with all files found under the collection directories
## Detailed Implementation Plan
### 1. Update Dependencies
First, we need to add the necessary dependencies to the Cargo.toml file:
```toml
[dependencies]
walkdir = "2.3.3"
pulldown-cmark = "0.9.3"
thiserror = "1.0.40"
lazy_static = "1.4.0"
toml = "0.7.3" # Add TOML parsing support
```
### 2. Replace the name_fix Function
Replace the current `name_fix` function in `utils.rs` with the one from the sal library:
```rust
pub fn name_fix(text: &str) -> String {
let mut result = String::with_capacity(text.len());
let mut last_was_underscore = false;
for c in text.chars() {
// Keep only ASCII characters
if c.is_ascii() {
// Replace specific characters with underscore
if c.is_whitespace() || c == ',' || c == '-' || c == '"' || c == '\'' ||
c == '#' || c == '!' || c == '(' || c == ')' || c == '[' || c == ']' ||
c == '=' || c == '+' || c == '<' || c == '>' || c == '@' || c == '$' ||
c == '%' || c == '^' || c == '&' || c == '*' {
// Only add underscore if the last character wasn't an underscore
if !last_was_underscore {
result.push('_');
last_was_underscore = true;
}
} else {
// Add the character as is (will be converted to lowercase later)
result.push(c);
last_was_underscore = false;
}
}
// Non-ASCII characters are simply skipped
}
// Convert to lowercase
return result.to_lowercase();
}
```
### 3. Add Collection Configuration Struct
Create a new struct to represent the configuration found in `.collection` files:
```rust
#[derive(Deserialize, Default)]
struct CollectionConfig {
name: Option<String>,
// Add other configuration options as needed
}
```
### 4. Add Scan Collections Method to DocTree
Add a new method to the DocTree struct to recursively scan directories for `.collection` files:
```rust
impl DocTree {
/// Recursively scan directories for .collection files and add them as collections
///
/// # Arguments
///
/// * `root_path` - The root path to start scanning from
///
/// # Returns
///
/// Ok(()) on success or an error
pub fn scan_collections<P: AsRef<Path>>(&mut self, root_path: P) -> Result<()> {
let root_path = root_path.as_ref();
// Walk through the directory tree
for entry in WalkDir::new(root_path).follow_links(true) {
let entry = match entry {
Ok(entry) => entry,
Err(e) => {
eprintln!("Error walking directory: {}", e);
continue;
}
};
// Skip non-directories
if !entry.file_type().is_dir() {
continue;
}
// Check if this directory contains a .collection file
let collection_file_path = entry.path().join(".collection");
if collection_file_path.exists() {
// Found a collection directory
let dir_path = entry.path();
// Get the directory name as a fallback collection name
let dir_name = dir_path.file_name()
.and_then(|name| name.to_str())
.unwrap_or("unnamed");
// Try to read and parse the .collection file
let collection_name = match fs::read_to_string(&collection_file_path) {
Ok(content) => {
// Parse as TOML
match toml::from_str::<CollectionConfig>(&content) {
Ok(config) => {
// Use the name from config if available, otherwise use directory name
config.name.unwrap_or_else(|| dir_name.to_string())
},
Err(e) => {
eprintln!("Error parsing .collection file at {:?}: {}", collection_file_path, e);
dir_name.to_string()
}
}
},
Err(e) => {
eprintln!("Error reading .collection file at {:?}: {}", collection_file_path, e);
dir_name.to_string()
}
};
// Add the collection to the DocTree
match self.add_collection(dir_path, &collection_name) {
Ok(_) => {
println!("Added collection '{}' from {:?}", collection_name, dir_path);
},
Err(e) => {
eprintln!("Error adding collection '{}' from {:?}: {}", collection_name, dir_path, e);
}
}
}
}
Ok(())
}
}
```
### 5. Update the DocTreeBuilder
Update the DocTreeBuilder to include a method for scanning collections:
```rust
impl DocTreeBuilder {
/// Scan for collections in the given root path
///
/// # Arguments
///
/// * `root_path` - The root path to scan for collections
///
/// # Returns
///
/// Self for method chaining or an error
pub fn scan_collections<P: AsRef<Path>>(self, root_path: P) -> Result<Self> {
// Ensure storage is set
let storage = self.storage.as_ref().ok_or_else(|| {
DocTreeError::MissingParameter("storage".to_string())
})?;
// Create a temporary DocTree to scan collections
let mut temp_doctree = DocTree {
collections: HashMap::new(),
default_collection: None,
storage: storage.clone(),
name: self.name.clone().unwrap_or_default(),
path: self.path.clone().unwrap_or_else(|| PathBuf::from("")),
};
// Scan for collections
temp_doctree.scan_collections(root_path)?;
// Create a new builder with the scanned collections
let mut new_builder = self;
for (name, collection) in temp_doctree.collections {
new_builder.collections.insert(name, collection);
}
Ok(new_builder)
}
}
```
### 6. Add a Convenience Function to the Library
Add a convenience function to the library for creating a DocTree by scanning a directory:
```rust
/// Create a new DocTree by scanning a directory for collections
///
/// # Arguments
///
/// * `root_path` - The root path to scan for collections
///
/// # Returns
///
/// A new DocTree or an error
pub fn from_directory<P: AsRef<Path>>(root_path: P) -> Result<DocTree> {
let storage = RedisStorage::new("redis://localhost:6379")?;
DocTree::builder()
.with_storage(storage)
.scan_collections(root_path)?
.build()
}
```
## Implementation Flow Diagram
```mermaid
flowchart TD
A[Start] --> B[Update Dependencies]
B --> C[Replace name_fix function]
C --> D[Add CollectionConfig struct]
D --> E[Add scan_collections method to DocTree]
E --> F[Update DocTreeBuilder]
F --> G[Add convenience function]
G --> H[End]
```
## Component Interaction Diagram
```mermaid
graph TD
A[DocTree] -->|manages| B[Collections]
C[scan_collections] -->|finds| D[.collection files]
D -->|parsed as| E[TOML]
E -->|extracts| F[Collection Name]
C -->|creates| B
G[name_fix] -->|processes| F
G -->|processes| H[File Names]
B -->|contains| H
```
## Testing Plan
1. Create test directories with `.collection` files in various formats
2. Test the scan_collections method with these directories
3. Verify that collections are created correctly with the expected names
4. Verify that all files under the collection directories are included in the collections
5. Test edge cases such as empty `.collection` files, invalid TOML, etc.

8
doctreecmd/Cargo.toml Normal file
View File

@ -0,0 +1,8 @@
[package]
name = "doctreecmd"
version = "0.1.0"
edition = "2024"
[dependencies]
doctree = { path = "../doctree" }
clap = "3.2.25"

357
doctreecmd/src/main.rs Normal file
View File

@ -0,0 +1,357 @@
use clap::{App, Arg, SubCommand};
use doctree::{DocTree, RedisStorage, Result, from_directory};
use std::path::Path;
fn main() -> Result<()> {
let matches = App::new("DocTree CLI")
.version("0.1.0")
.author("Your Name")
.about("A tool to manage document collections")
.subcommand(
SubCommand::with_name("scan")
.about("Scan a directory and create a collection")
.arg(Arg::with_name("path").required(true).help("Path to the directory"))
.arg(Arg::with_name("name").required(true).help("Name of the collection")),
)
.subcommand(
SubCommand::with_name("list")
.about("List collections"),
)
.subcommand(
SubCommand::with_name("scan-collections")
.about("Recursively scan directories for .collection files")
.arg(Arg::with_name("path").required(true).help("Root path to scan for collections")),
)
.subcommand(
SubCommand::with_name("scan-and-info")
.about("Scan collections and show detailed information")
.arg(Arg::with_name("path").required(true).help("Root path to scan for collections"))
.arg(Arg::with_name("collection").help("Name of the collection (optional)")),
)
.subcommand(
SubCommand::with_name("info")
.about("Show detailed information about collections")
.arg(Arg::with_name("collection").help("Name of the collection (optional)")),
)
.subcommand(
SubCommand::with_name("get")
.about("Get page content")
.arg(Arg::with_name("collection")
.short("c".chars().next().unwrap())
.long("collection")
.takes_value(true)
.help("Name of the collection (optional)"))
.arg(Arg::with_name("page")
.short("p".chars().next().unwrap())
.long("page")
.required(true)
.takes_value(true)
.help("Name of the page"))
.arg(Arg::with_name("format")
.short("f".chars().next().unwrap())
.long("format")
.takes_value(true)
.help("Output format (html or markdown, default: markdown)")),
)
.subcommand(
SubCommand::with_name("html")
.about("Get page content as HTML")
.arg(Arg::with_name("collection").required(true).help("Name of the collection"))
.arg(Arg::with_name("page").required(true).help("Name of the page")),
)
.subcommand(
SubCommand::with_name("delete-collection")
.about("Delete a collection from Redis")
.arg(Arg::with_name("collection").required(true).help("Name of the collection")),
)
.subcommand(
SubCommand::with_name("reset")
.about("Delete all collections from Redis"),
)
.get_matches();
// Create a Redis storage instance
let storage = RedisStorage::new("redis://localhost:6379")?;
// Create a DocTree instance
let mut doctree = DocTree::builder()
.with_storage(storage)
.build()?;
// Handle subcommands
if let Some(matches) = matches.subcommand_matches("scan") {
let path = matches.value_of("path").unwrap();
let name = matches.value_of("name").unwrap();
println!("Scanning directory: {}", path);
doctree.add_collection(Path::new(path), name)?;
println!("Collection '{}' created successfully", name);
} else if let Some(_) = matches.subcommand_matches("list") {
let collections = doctree.list_collections();
if collections.is_empty() {
println!("No collections found");
} else {
println!("Collections:");
for collection in collections {
println!("- {}", collection);
}
}
} else if let Some(matches) = matches.subcommand_matches("get") {
let collection = matches.value_of("collection");
let page = matches.value_of("page").unwrap();
let format = matches.value_of("format").unwrap_or("markdown");
if format.to_lowercase() == "html" {
let html = doctree.page_get_html(collection, page)?;
println!("{}", html);
} else {
let content = doctree.page_get(collection, page)?;
println!("{}", content);
}
} else if let Some(matches) = matches.subcommand_matches("html") {
let collection = matches.value_of("collection").unwrap();
let page = matches.value_of("page").unwrap();
let html = doctree.page_get_html(Some(collection), page)?;
println!("{}", html);
} else if let Some(matches) = matches.subcommand_matches("delete-collection") {
let collection = matches.value_of("collection").unwrap();
println!("Deleting collection '{}' from Redis...", collection);
doctree.delete_collection(collection)?;
println!("Collection '{}' deleted successfully", collection);
} else if let Some(_) = matches.subcommand_matches("reset") {
println!("Deleting all collections from Redis...");
doctree.delete_all_collections()?;
println!("All collections deleted successfully");
} else if let Some(matches) = matches.subcommand_matches("scan-collections") {
let path = matches.value_of("path").unwrap();
println!("Recursively scanning for collections in: {}", path);
// Use the from_directory function to create a DocTree with all collections
let doctree = from_directory(Path::new(path))?;
// Print the discovered collections
let collections = doctree.list_collections();
if collections.is_empty() {
println!("No collections found");
} else {
println!("Discovered collections:");
for collection in collections {
println!("- {}", collection);
}
}
} else if let Some(matches) = matches.subcommand_matches("scan-and-info") {
let path = matches.value_of("path").unwrap();
let collection_name = matches.value_of("collection");
println!("Recursively scanning for collections in: {}", path);
// Use the from_directory function to create a DocTree with all collections
let doctree = from_directory(Path::new(path))?;
// Print the discovered collections
let collections = doctree.list_collections();
if collections.is_empty() {
println!("No collections found");
return Ok(());
}
println!("Discovered collections:");
for collection in &collections {
println!("- {}", collection);
}
println!("\nDetailed Collection Information:");
if let Some(name) = collection_name {
// Show info for a specific collection
match doctree.get_collection(name) {
Ok(collection) => {
println!("Collection Information for '{}':", name);
println!(" Path: {:?}", collection.path);
println!(" Redis Key: collections:{}", collection.name);
// List documents
match collection.page_list() {
Ok(pages) => {
println!(" Documents ({}):", pages.len());
for page in pages {
match collection.page_get_path(&page) {
Ok(path) => {
println!(" - {} => Redis: collections:{} / {}", path, collection.name, page);
},
Err(_) => {
println!(" - {}", page);
}
}
}
},
Err(e) => println!(" Error listing documents: {}", e),
}
// List files
match collection.file_list() {
Ok(files) => {
// Filter images
let images: Vec<String> = files.iter()
.filter(|f|
f.ends_with(".png") || f.ends_with(".jpg") ||
f.ends_with(".jpeg") || f.ends_with(".gif") ||
f.ends_with(".svg"))
.cloned()
.collect();
println!(" Images ({}):", images.len());
for image in images {
println!(" - {} => Redis: collections:{} / {}", image, collection.name, image);
}
// Filter other files
let other_files: Vec<String> = files.iter()
.filter(|f|
!f.ends_with(".png") && !f.ends_with(".jpg") &&
!f.ends_with(".jpeg") && !f.ends_with(".gif") &&
!f.ends_with(".svg"))
.cloned()
.collect();
println!(" Other Files ({}):", other_files.len());
for file in other_files {
println!(" - {} => Redis: collections:{} / {}", file, collection.name, file);
}
},
Err(e) => println!(" Error listing files: {}", e),
}
},
Err(e) => println!("Error: {}", e),
}
} else {
// Show info for all collections
for name in collections {
if let Ok(collection) = doctree.get_collection(&name) {
println!("- {} (Redis Key: collections:{})", name, collection.name);
println!(" Path: {:?}", collection.path);
// Count documents and images
if let Ok(pages) = collection.page_list() {
println!(" Documents: {}", pages.len());
}
if let Ok(files) = collection.file_list() {
let image_count = files.iter()
.filter(|f|
f.ends_with(".png") || f.ends_with(".jpg") ||
f.ends_with(".jpeg") || f.ends_with(".gif") ||
f.ends_with(".svg"))
.count();
println!(" Images: {}", image_count);
println!(" Other Files: {}", files.len() - image_count);
}
}
}
}
} else if let Some(matches) = matches.subcommand_matches("info") {
let collection_name = matches.value_of("collection");
if let Some(name) = collection_name {
// Show info for a specific collection
match doctree.get_collection(name) {
Ok(collection) => {
println!("Collection Information for '{}':", name);
println!(" Path: {:?}", collection.path);
println!(" Redis Key: collections:{}", collection.name);
// List documents
match collection.page_list() {
Ok(pages) => {
println!(" Documents ({}):", pages.len());
for page in pages {
match collection.page_get_path(&page) {
Ok(path) => {
println!(" - {} => Redis: collections:{} / {}", path, collection.name, page);
},
Err(_) => {
println!(" - {}", page);
}
}
}
},
Err(e) => println!(" Error listing documents: {}", e),
}
// List files
match collection.file_list() {
Ok(files) => {
// Filter images
let images: Vec<String> = files.iter()
.filter(|f|
f.ends_with(".png") || f.ends_with(".jpg") ||
f.ends_with(".jpeg") || f.ends_with(".gif") ||
f.ends_with(".svg"))
.cloned()
.collect();
println!(" Images ({}):", images.len());
for image in images {
println!(" - {} => Redis: collections:{} / {}", image, collection.name, image);
}
// Filter other files
let other_files: Vec<String> = files.iter()
.filter(|f|
!f.ends_with(".png") && !f.ends_with(".jpg") &&
!f.ends_with(".jpeg") && !f.ends_with(".gif") &&
!f.ends_with(".svg"))
.cloned()
.collect();
println!(" Other Files ({}):", other_files.len());
for file in other_files {
println!(" - {} => Redis: collections:{} / {}", file, collection.name, file);
}
},
Err(e) => println!(" Error listing files: {}", e),
}
},
Err(e) => println!("Error: {}", e),
}
} else {
// Show info for all collections
let collections = doctree.list_collections();
if collections.is_empty() {
println!("No collections found");
} else {
println!("Collections:");
for name in collections {
if let Ok(collection) = doctree.get_collection(&name) {
println!("- {} (Redis Key: collections:{})", name, collection.name);
println!(" Path: {:?}", collection.path);
// Count documents and images
if let Ok(pages) = collection.page_list() {
println!(" Documents: {}", pages.len());
}
if let Ok(files) = collection.file_list() {
let image_count = files.iter()
.filter(|f|
f.ends_with(".png") || f.ends_with(".jpg") ||
f.ends_with(".jpeg") || f.ends_with(".gif") ||
f.ends_with(".svg"))
.count();
println!(" Images: {}", image_count);
println!(" Other Files: {}", files.len() - image_count);
}
}
}
}
}
} else {
println!("No command specified. Use --help for usage information.");
}
Ok(())
}

31
example_commands.sh Executable file
View File

@ -0,0 +1,31 @@
#!/bin/bash
# Change to the directory where the script is located
cd "$(dirname "$0")"
# Exit immediately if a command exits with a non-zero status
set -e
cd doctreecmd
echo "=== Scanning Collections ==="
cargo run -- scan-collections ../examples
echo -e "\n=== Listing Collections ==="
cargo run -- list
echo -e "\n=== Getting Document (Markdown) ==="
cargo run -- get -c grid_documentation -p introduction.md
echo -e "\n=== Getting Document (HTML) ==="
cargo run -- get -c grid_documentation -p introduction.md -f html
echo -e "\n=== Deleting Collection ==="
cargo run -- delete-collection grid_documentation
echo -e "\n=== Listing Remaining Collections ==="
cargo run -- list
echo -e "\n=== Resetting All Collections ==="
cargo run -- reset
echo -e "\n=== Verifying Reset ==="
cargo run -- list

View File

@ -0,0 +1 @@
name = "Grid Documentation"

View File

@ -0,0 +1,8 @@
{
"label": "AIBox Benefits",
"position": 4,
"link": {
"type": "generated-index",
"description": "The benefits of AIBox"
}
}

View File

@ -0,0 +1,28 @@
---
title: Revenue Generation
sidebar_position: 2
---
### Renting Options
AIBox creates opportunities for revenue generation through resource sharing. The following numbers are suggestive as each AIBox owners can set their own pricing.
| Plan | Rate | Monthly Potential | Usage Scenario |
|------|------|------------------|----------------|
| Micro | $0.40/hr | $200-300 | Inference workloads |
| Standard | $0.80/hr | $400-600 | Development |
| Full GPU | $1.60/hr | $800-1,200 | Training |
### Proof of Capacity Revenues
The AIBox implements a tiered proof of capacity reward system, distributing monthly INCA tokens based on hardware configuration
| Configuration | Monthly Rewards |
|---------------|----------------|
| Base AIBox | 500-2000 INCA |
| 1 GPU AIBox | 1000 INCA |
| 2 GPU AIBox | 2000 INCA |
### Proof of Utilization Revenues
The AIBox implements a revenue-sharing model wherein device owners receive 80% of INCA tokens utilized for deployments, providing transparent proof of utilization economics.

View File

@ -0,0 +1,32 @@
---
title: Use Cases
sidebar_position: 3
---
### Personal AI Development
The AIBox provides an ideal environment for individual developers working on AI projects:
- Model training and fine-tuning
- Experimental AI architectures
- Unrestricted testing and development
- Complete control over computing resources
The system allows developers to run extended training sessions without watching cloud billing meters or dealing with usage restrictions.
### Shared Resources
For teams and organizations, AIBox offers efficient resource sharing capabilities:
- Multi-user environment
- Resource pooling
- Cost sharing
- Distributed computing
This makes it particularly valuable for small teams and startups looking to maintain control over their AI infrastructure while managing costs.
### Commercial Applications
The system supports various commercial deployments:
- AI-as-a-Service
- Model hosting
- Inference endpoints
- Dataset processing

View File

@ -0,0 +1,8 @@
{
"label": "Getting Started",
"position": 5,
"link": {
"type": "generated-index",
"description": "Getting started with the AIBox"
}
}

View File

@ -0,0 +1,10 @@
---
title: Pre-Order Process
sidebar_position: 2
---
### How to Order
The steps to qcquire an AIBox is simple:
1. [Select your configuration](./purchase_options.md)
2. [Submit pre-order form](https://www2.aibox.threefold.io/signup/)

View File

@ -0,0 +1,84 @@
---
title: Purchase Options
sidebar_position: 1
---
### Base AIBox Plan ($1-1500)
For experienced builders and hardware enthusiasts who want to customize their AI infrastructure. This plan provides the essential framework while allowing you to select and integrate your own GPU.
Base Configuration:
- GPU: Your choice, with minimum requirement of AMD Radeon RX 7900 XT
* Flexibility to use existing GPU or select preferred model
* Support for multiple GPU vendors with minimum performance requirements
* Full integration support for chosen hardware
- Memory: 64-128 GB DDR5
* Expandable configuration
* High-speed memory modules
* ECC support optional
- Storage: 2-4 TB of NVMe SSD
* PCIe 4.0 support
* Configurable RAID options
* Expansion capabilities
- Integrated Mycelium Network
* Full network stack
* P2P capabilities
* Decentralized computing support
Rewards Structure:
- Proof of Capacity: 500-2000 INCA per month (depending on chosen GPU)
- Proof of Utilization: 80% of INCA Revenue
- Flexible earning potential based on hardware configuration
### 1 GPU AIBox Plan ($2-2500)
Perfect for individual developers and researchers who need professional-grade AI computing power. This configuration provides enough processing power for smaller but smart models and AI agents.
Standard Configuration:
- 1x AMD Radeon RX 7900 XTX
* 24GB VRAM
* 61.6 TFLOPS FP32 Performance
* 960 GB/s Memory Bandwidth
- 64-128 GB DDR5 Memory
* Optimal for AI workloads
* High-speed data processing
* Multi-tasking capability
- 2-4 TB of NVMe SSD
* Ultra-fast storage access
* Ample space for datasets
* Quick model loading
- Integrated Mycelium
* Full network integration
* Ready for distributed computing
* P2P capabilities enabled
Rewards Structure:
- Proof of Capacity: 1000 INCA per month
- Proof of Utilization: 80% of INCA Revenue
- Consistent earning potential
### 2 GPU AIBox Plan ($4-5000)
Our most powerful configuration, designed for serious AI researchers and organizations. This setup supports large 48GB models, providing substantial computing power for advanced AI applications.
Advanced Configuration:
- 2x AMD Radeon RX 7900 XTX
* Combined 48GB VRAM
* 123.2 TFLOPS total FP32 Performance
* 1920 GB/s Total Memory Bandwidth
- 64-128 GB DDR5 Memory
* Maximum performance configuration
* Support for multiple large models
* Extensive multi-tasking capability
- 2-4 TB of NVMe SSD
* Enterprise-grade storage
* RAID configuration options
* Expandable capacity
- Integrated Mycelium
* Enhanced network capabilities
* Full distributed computing support
* Advanced P2P features
Rewards Structure:
- Proof of Capacity: 2000 INCA per month
- Proof of Utilization: 80% of INCA Revenue
- Maximum earning potential
Each plan includes comprehensive support, setup assistance, and access to the full AIBox ecosystem. Configurations can be further customized within each plan's framework to meet specific requirements.

View File

@ -0,0 +1,8 @@
---
title: Support
sidebar_position: 3
---
Our support team is composed of technically proficient members who understand AI development needs.
Feel free to reach out the ThreeFold Support [here](https://threefoldfaq.crisp.help/en/) for more information.

View File

@ -0,0 +1,24 @@
---
title: Introducing AIBox
sidebar_position: 1
slug: /
---
## AIBox: Powering Community-Driven AI
The AIBox is built for those who want to explore AI on their own terms. With 2 RX 7900 XTX GPUs and 48GB of memory, it enables running demanding AI models efficiently.
## Open AI Development
AIBox offers full control—no cloud restrictions, no unexpected costs. Train models, fine-tune AI systems, and experiment freely with PyTorch, TensorFlow, or low-level GPU programming.
## More Than Hardware: A Shared Network
AIBox isnt just a tool—its part of a decentralized AI network. When idle, its GPU power can be shared via Mycelium, benefiting the wider community while generating value. Designed for efficiency, with water cooling and power monitoring, its a practical, community-powered step toward open AI development.
## Expanding the ThreeFold Grid
Each AIBox integrates into the ThreeFold Grid, a decentralized Internet infrastructure active in over 50 countries. By connecting your AIBox, you contribute to this global network, enhancing its capacity and reach. This integration not only supports your AI endeavors but also strengthens a community-driven Internet ecosystem.
More info about threefold see: https://www.threefold.io

View File

@ -0,0 +1,8 @@
{
"label": "AIBox Overview",
"position": 2,
"link": {
"type": "generated-index",
"description": "Overview of the AIBox"
}
}

View File

@ -0,0 +1,12 @@
---
title: Vision & Mission
sidebar_position: 2
---
## AI Landscape
The AI landscape today is dominated by centralized cloud providers, creating barriers for innovation and increasing costs for developers. Our vision is different: we're building tools for a decentralized AI future where computing power isn't monopolized by large cloud providers.
## High-End AI Hardware
Our technical goal is straightforward: provide enterprise-grade AI hardware that's both powerful and profitable through resource sharing. We believe that AI development should be accessible to anyone with the technical skills to push boundaries.

View File

@ -0,0 +1,27 @@
---
title: Who Is AIBox For?
sidebar_position: 4
---
The AIBox is for hackers and AI explorers who want a simple, accessible gateway into AI experimentation, while also offering advanced features for those ready to push the boundaries of what's possible.
### Developers & Hackers
Technical capabilities:
- Direct GPU programming through ROCm
- Custom containerization support
- Full Linux kernel access
- P2P networking capabilities
### AI Researchers
Research-focused features:
- Support for popular ML frameworks (PyTorch, TensorFlow)
- Large model training capability (up to 48GB VRAM)
- Distributed training support
- Dataset management tools
### Tech Enthusiasts
Advanced features:
- Water cooling management interface
- Power consumption monitoring
- Performance benchmarking tools
- Resource allocation controls

View File

@ -0,0 +1,18 @@
---
title: Why Decentralized AI Matters
sidebar_position: 3
---
The AIBox gives you complete control over your data privacy with full hardware access while enabling unlimited experimentation without the restrictions of cloud platforms.
### Data Privacy & Control
- Full root access to hardware
- No data leaving your premises without explicit permission
- Custom firewall rules and network configurations
- Ability to air-gap when needed
### Unlimited Experimentation
- Direct GPU access without virtualization overhead
- Custom model training without cloud restrictions
- Unrestricted model sizes and training durations
- Freedom to modify system parameters

View File

@ -0,0 +1,8 @@
{
"label": "Technical Specs",
"position": 3,
"link": {
"type": "generated-index",
"description": "Technical aspects of the AIBox"
}
}

View File

@ -0,0 +1,35 @@
---
title: Features & Capabilities
sidebar_position: 3
---
## Overview
AIBox combines enterprise-grade hardware capabilities with flexible resource management, creating a powerful platform for AI development and deployment. Each feature is designed to meet the demanding needs of developers and researchers who require both raw computing power and precise control over their resources.
## VM Management (CloudSlices)
CloudSlices transforms your AIBox into a multi-tenant powerhouse, enabling you to run multiple isolated environments simultaneously. Unlike traditional virtualization, CloudSlices is optimized for AI workloads, ensuring minimal overhead and maximum GPU utilization.
Each slice operates as a fully isolated virtual machine with guaranteed resources. The AIBox can be sliced into up to 8 virtual machines.
The slicing system ensures resources are allocated efficiently while maintaining performance isolation between workloads. This means your critical training job won't be affected by other tasks running on the system.
## GPU Resource Management
Our GPU management system provides granular control while maintaining peak performance. Whether you're running a single large model or multiple smaller workloads, the system optimizes resource allocation automatically.
## Network Connectivity
The networking stack is built for both performance and security, integrating seamlessly with the Mycelium network, providing end-to-end encryption, and and Web gateways, allowing external connection to VM containers. The AI Box thus creates a robust foundation for distributed AI computing.
## Security Features
Security is implemented at every layer of the system without compromising performance:
System Security:
- Hardware-level isolation
- Secure boot chain
- Network segmentation
Each feature has been carefully selected and implemented to provide both practical utility and enterprise-grade security, ensuring your AI workloads and data remain protected while maintaining full accessibility for authorized users.

View File

@ -0,0 +1,37 @@
---
title: Hardware Specifications
sidebar_position: 1
---
### GPU Options
At the heart of AIBox lies its GPU configuration, carefully selected for AI workloads. The AMD Radeon RX 7900 XTX provides an exceptional balance of performance, memory, and cost efficiency:
| Model | VRAM | FP32 Performance | Memory Bandwidth |
|-------|------|------------------|------------------|
| RX 7900 XTX | 24GB | 61.6 TFLOPS | 960 GB/s |
| Dual Config | 48GB | 123.2 TFLOPS | 1920 GB/s |
The dual GPU configuration enables handling larger models and datasets that wouldn't fit in single-GPU memory, making it ideal for advanced AI research and development.
### Memory & Storage
AI workloads demand high-speed memory and storage. The AIBox configuration ensures your GPU computing power isn't bottlenecked by I/O limitations:
Memory Configuration:
- RAM: 64GB/128GB DDR5-4800
- Storage: 2x 2TB NVMe SSDs (PCIe 4.0)
This setup provides ample memory for large dataset preprocessing and fast storage access for model training and inference.
### Cooling System
Thermal management is crucial for sustained AI workloads. Our cooling solution focuses on maintaining consistent performance during extended operations:
This cooling system allows for sustained maximum performance without thermal throttling, even during extended training sessions.
### Power Supply
Reliable power delivery is essential for system stability and performance.
The AIBox power configuration ensures clean, stable power delivery under all operating conditions, with headroom for additional components or intense workloads.

View File

@ -0,0 +1,29 @@
---
title: Software Stack
sidebar_position: 2
---
### ThreeFold Zero-OS
Zero-OS forms the foundation of AIBox's software architecture. Unlike traditional operating systems, it's a minimalist, security-focused platform optimized specifically for AI workloads and distributed computing.
Key features:
- Bare metal operating system with minimal overhead
- Zero overhead virtualization
- Secure boot process
- Automated resource management
This specialized operating system ensures maximum performance and security while eliminating unnecessary services and potential vulnerabilities.
### Mycelium Network Integration
The Mycelium Network integration transforms your AIBox from a standalone system into a node in a powerful distributed computing network based on peer-to-peer and end-to-end encrypted communication always choosing the shortest path.
### Pre-installed AI Frameworks
Your AIBox comes ready for development with a comprehensive AI software stack:
- ROCm 5.7+ ML stack
- PyTorch 2.1+ with GPU optimization
- TensorFlow 2.14+
- Pre-built container images

View File

@ -0,0 +1 @@
name = "supercollection"

View File

@ -0,0 +1,38 @@
---
title: Features Mycelium Network
sidebar_position: 1
---
Mycelium is a locality-aware, end-to-end encrypted network designed for efficient and secure communication between nodes. Below are its key features:
## What Makes Mycelium Unique
1. **Locality Awareness**
Mycelium identifies the shortest path between nodes, optimizing communication based on location.
2. **End-to-End Encryption**
All traffic between nodes is encrypted, ensuring secure data transmission.
3. **Traffic Routing Over Friend Nodes**
Traffic can be routed through nodes of trusted friends, maintaining location awareness.
4. **Automatic Rerouting**
If a physical link fails, Mycelium automatically reroutes traffic to ensure uninterrupted connectivity.
5. **Your network Address Linked to Private Key**
Each node is assigned an IPv6 network address that is cryptographically linked to its private key.
6. **Scalability**
Mycelium is designed to scale to a planetary level. The team has evaluated multiple overlay networks in the past and is focused on overcoming scalability challenges.
## Tech
1. **Flexible Deployment**
Mycelium can be run without a TUN interface, allowing it to function solely as a reliable message bus.
2. **Reliable Message Bus**
Mycelium includes a simple and reliable message bus built on top of its network layer.
1. **Multiple Communication Protocols**
Mycelium supports various communication methods, including QUIC and TCP. The team is also developing hole-punching for QUIC, enabling direct peer-to-peer (P2P) traffic without intermediaries.

View File

@ -0,0 +1,23 @@
---
title: Download the App
sidebar_position: 4
---
The Mycelium app is available for Android, Windows, macOS and iOS.
For Linux, read the [Linux Installation](../experts/03_linux-installation.md) section.
## Download Links
You can download the Mycelium app with the following links:
- [iOS and macOS](https://apps.apple.com/app/id6504277565)
- Download the app from the App Store
- [Android](https://play.google.com/store/apps/details?id=tech.threefold.mycelium)
- Download the app from the Google Play Store
- [Windows](https://github.com/threefoldtech/myceliumflut/releases)
- Go to the official Mycelium release page and download the latest `.exe`
## Upcoming Updates
- The user interface (UI) will be drastically improved in upcoming releases to better represent the available features.

View File

@ -0,0 +1,48 @@
---
title: Use the App
sidebar_position: 5
---
## Start Mycelium
To start Mycelium, simply open the app and click on `Start`.
![](./img/mycelium_1.png)
> Note for Windows Users: The Mycelium app must be run as an administrator to function properly. Right-click on the application icon and select "Run as administrator" to ensure proper network connectivity.
## Stop or Restart Mycelium
To stop or restart Mycelium, click on the appropriate button.
![](./img/mycelium_2.png)
## Add Peers
You can add different Mycelium peers in the `Peers` window.
Simply add peers and then either start or restart the app.
![](./img/mycelium_3.png)
You can consult the [Mycelium hosted public nodes](../experts/04_additional-information.md) to find more peers.
For example, if you want to add the node with the IPv4 address `5.78.122.16` with the tcp port `9651`, simply add the following line then start or restart the app.
```
tcp://5.78.122.16:9651
```
## Mycelium Address
When you use the Mycelium app, you are assigned a unique Mycelium address.
To copy the Mycelium address, click on the button on the right of the address.
![](./img/mycelium_4.png)
## Deploy on the Grid with Mycelium
Once you've installed Mycelium, you can deploy on the ThreeFold Grid and connect to your workload using Mycelium.
As a starter, you can explore the ThreeFold Grid and deploy apps on the [ThreeFold Dashboard](https://manual.grid.tf/documentation/dashboard/dashboard.html) using Mycelium to connect.

View File

@ -0,0 +1,8 @@
{
"label": "Get Started",
"position": 4,
"link": {
"type": "generated-index",
"description": "Get started With Mycelium Network."
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.9 KiB

39
runexample.sh Executable file
View File

@ -0,0 +1,39 @@
#!/bin/bash
# Change to the directory where the script is located
cd "$(dirname "$0")"
# Exit immediately if a command exits with a non-zero status
set -e
cd doctreecmd
# First, scan the collections
echo "=== Scanning Collections ==="
cargo run -- scan-and-info ../examples supercollection
# Get a document in markdown format
echo -e "\n=== Getting Document (Markdown) ==="
cargo run -- get -c supercollection -p 01_features.md
# Get a document in HTML format
echo -e "\n=== Getting Document (HTML) ==="
cargo run -- get -c supercollection -p 01_features.md -f html
# Get a document without specifying collection
echo -e "\n=== Getting Document (Default Collection) ==="
cargo run -- get -p 01_features.md
# Delete a specific collection
echo -e "\n=== Deleting Collection ==="
cargo run -- delete-collection grid_documentation
# List remaining collections
echo -e "\n=== Listing Remaining Collections ==="
cargo run -- list
# Reset all collections
echo -e "\n=== Resetting All Collections ==="
cargo run -- reset
# Verify all collections are gone
echo -e "\n=== Verifying Reset ==="
cargo run -- list