Compare commits
16 Commits
052cf2ecdb
...
8798bc202e
Author | SHA1 | Date | |
---|---|---|---|
|
8798bc202e | ||
|
9fa9832605 | ||
|
4bb24b38dd | ||
|
f3da14b957 | ||
|
5ea34b4445 | ||
|
d9a3b711d1 | ||
a1127b72da | |||
3850df89be | |||
45195d403e | |||
f17b441ca1 | |||
ff4ea1d844 | |||
c9e1dcdb6c | |||
56699b9abb | |||
dd90a49615 | |||
9054737e84 | |||
09553f54c8 |
173
Cargo.lock
generated
173
Cargo.lock
generated
@@ -214,9 +214,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.9.2"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29"
|
||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d"
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
@@ -257,9 +263,9 @@ checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.1"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
|
||||
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
|
||||
|
||||
[[package]]
|
||||
name = "chacha20"
|
||||
@@ -396,6 +402,30 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.9.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.6"
|
||||
@@ -444,7 +474,7 @@ dependencies = [
|
||||
"hashbrown 0.14.5",
|
||||
"lock_api",
|
||||
"once_cell",
|
||||
"parking_lot_core",
|
||||
"parking_lot_core 0.9.11",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -582,13 +612,23 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "form_urlencoded"
|
||||
version = "1.2.1"
|
||||
version = "1.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
|
||||
checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf"
|
||||
dependencies = [
|
||||
"percent-encoding",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fs2"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures"
|
||||
version = "0.3.31"
|
||||
@@ -684,6 +724,15 @@ dependencies = [
|
||||
"slab",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fxhash"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "generic-array"
|
||||
version = "0.14.7"
|
||||
@@ -782,6 +831,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"sled",
|
||||
"thiserror 1.0.69",
|
||||
"tokio",
|
||||
]
|
||||
@@ -939,7 +989,7 @@ dependencies = [
|
||||
"intl-memoizer",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"parking_lot",
|
||||
"parking_lot 0.12.4",
|
||||
"rust-embed",
|
||||
"thiserror 1.0.69",
|
||||
"unic-langid",
|
||||
@@ -1068,9 +1118,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
|
||||
checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de"
|
||||
dependencies = [
|
||||
"idna_adapter",
|
||||
"smallvec",
|
||||
@@ -1106,6 +1156,15 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "intl-memoizer"
|
||||
version = "0.5.3"
|
||||
@@ -1127,11 +1186,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "io-uring"
|
||||
version = "0.7.9"
|
||||
version = "0.7.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
|
||||
checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"bitflags 2.9.3",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
]
|
||||
@@ -1229,7 +1288,7 @@ dependencies = [
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
"jsonrpsee-types",
|
||||
"parking_lot",
|
||||
"parking_lot 0.12.4",
|
||||
"pin-project",
|
||||
"rand 0.9.2",
|
||||
"rustc-hash 2.1.1",
|
||||
@@ -1440,6 +1499,17 @@ version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
|
||||
dependencies = [
|
||||
"instant",
|
||||
"lock_api",
|
||||
"parking_lot_core 0.8.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.12.4"
|
||||
@@ -1447,7 +1517,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13"
|
||||
dependencies = [
|
||||
"lock_api",
|
||||
"parking_lot_core",
|
||||
"parking_lot_core 0.9.11",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"instant",
|
||||
"libc",
|
||||
"redox_syscall 0.2.16",
|
||||
"smallvec",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1458,7 +1542,7 @@ checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"redox_syscall",
|
||||
"redox_syscall 0.5.17",
|
||||
"smallvec",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
@@ -1475,9 +1559,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "percent-encoding"
|
||||
version = "2.3.1"
|
||||
version = "2.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
|
||||
checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
|
||||
|
||||
[[package]]
|
||||
name = "pin-project"
|
||||
@@ -1585,9 +1669,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.97"
|
||||
version = "1.0.101"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d61789d7719defeb74ea5fe81f2fdfdbd28a803847077cecce2ff14e1472f6f1"
|
||||
checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -1696,13 +1780,22 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.5.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"bitflags 2.9.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1851,9 +1944,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f"
|
||||
|
||||
[[package]]
|
||||
name = "rustls-webpki"
|
||||
version = "0.103.4"
|
||||
version = "0.103.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc"
|
||||
checksum = "b5a37813727b78798e53c2bec3f5e8fe12a6d6f8389bf9ca7802add4c9905ad8"
|
||||
dependencies = [
|
||||
"ring",
|
||||
"rustls-pki-types",
|
||||
@@ -1925,7 +2018,7 @@ version = "3.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60b369d18893388b345804dc0007963c99b7d665ae71d275812d828c6f089640"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"bitflags 2.9.3",
|
||||
"core-foundation",
|
||||
"core-foundation-sys",
|
||||
"libc",
|
||||
@@ -1985,9 +2078,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.142"
|
||||
version = "1.0.143"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
|
||||
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@@ -2053,6 +2146,22 @@ version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
|
||||
|
||||
[[package]]
|
||||
name = "sled"
|
||||
version = "0.34.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"fs2",
|
||||
"fxhash",
|
||||
"libc",
|
||||
"log",
|
||||
"parking_lot 0.11.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.15.1"
|
||||
@@ -2228,7 +2337,7 @@ dependencies = [
|
||||
"io-uring",
|
||||
"libc",
|
||||
"mio",
|
||||
"parking_lot",
|
||||
"parking_lot 0.12.4",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"slab",
|
||||
@@ -2431,9 +2540,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
|
||||
|
||||
[[package]]
|
||||
name = "url"
|
||||
version = "2.5.4"
|
||||
version = "2.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
|
||||
checksum = "137a3c834eaf7139b73688502f3f1141a0337c5d8e4d9b536f9b8c796e26a7c4"
|
||||
dependencies = [
|
||||
"form_urlencoded",
|
||||
"idna",
|
||||
@@ -2537,11 +2646,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.9"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22"
|
||||
dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
37
Cargo.toml
37
Cargo.toml
@@ -1,9 +1,30 @@
|
||||
[workspace]
|
||||
members = ["herodb"]
|
||||
resolver = "2"
|
||||
[package]
|
||||
name = "herodb"
|
||||
version = "0.0.1"
|
||||
authors = ["ThreeFold Tech NV"]
|
||||
edition = "2024"
|
||||
|
||||
# You can define shared profiles for all workspace members here
|
||||
[profile.release]
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
strip = true
|
||||
[dependencies]
|
||||
anyhow = "1.0.59"
|
||||
bytes = "1.3.0"
|
||||
thiserror = "1.0.32"
|
||||
tokio = { version = "1.23.0", features = ["full"] }
|
||||
clap = { version = "4.5.20", features = ["derive"] }
|
||||
byteorder = "1.4.3"
|
||||
futures = "0.3"
|
||||
sled = "0.34"
|
||||
redb = "2.1.3"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
bincode = "1.3"
|
||||
chacha20poly1305 = "0.10.1"
|
||||
rand = "0.8"
|
||||
sha2 = "0.10"
|
||||
age = "0.10"
|
||||
secrecy = "0.8"
|
||||
ed25519-dalek = "2"
|
||||
base64 = "0.22"
|
||||
jsonrpsee = { version = "0.26.0", features = ["http-client", "ws-client", "server", "macros"] }
|
||||
|
||||
[dev-dependencies]
|
||||
redis = { version = "0.24", features = ["aio", "tokio-comp"] }
|
||||
|
128
README.md
128
README.md
@@ -1,91 +1,85 @@
|
||||
# HeroDB
|
||||
|
||||
Redis-compatible database server with encryption and AGE cryptographic operations.
|
||||
HeroDB is a Redis-compatible database built with Rust, offering a flexible and secure storage solution. It supports two primary storage backends: `redb` (default) and `sled`, both with full encryption capabilities. HeroDB aims to provide a robust and performant key-value store with advanced features like data-at-rest encryption, hash operations, list operations, and cursor-based scanning.
|
||||
|
||||
## Purpose
|
||||
|
||||
The main purpose of HeroDB is to offer a lightweight, embeddable, and Redis-compatible database that prioritizes data security through transparent encryption. It's designed for applications that require fast, reliable data storage with the option for strong cryptographic protection, without the overhead of a full-fledged Redis server.
|
||||
|
||||
## Features
|
||||
|
||||
- Redis protocol compatibility
|
||||
- String, hash, and list data types
|
||||
- Key expiration and persistence
|
||||
- Database encryption with ChaCha20-Poly1305
|
||||
- AGE encryption/decryption operations
|
||||
- Digital signatures with Ed25519
|
||||
- Persistent storage using redb
|
||||
- **Redis Compatibility**: Supports a subset of Redis commands over RESP (Redis Serialization Protocol) via TCP.
|
||||
- **Dual Backend Support**:
|
||||
- `redb` (default): Optimized for concurrent access and high-throughput scenarios.
|
||||
- `sled`: A lock-free, log-structured database, excellent for specific workloads.
|
||||
- **Data-at-Rest Encryption**: Transparent encryption for both backends using the `age` encryption library.
|
||||
- **Key-Value Operations**: Full support for basic string, hash, and list operations.
|
||||
- **Expiration**: Time-to-live (TTL) functionality for keys.
|
||||
- **Scanning**: Cursor-based iteration for keys and hash fields (`SCAN`, `HSCAN`).
|
||||
- **AGE Cryptography Commands**: HeroDB-specific extensions for cryptographic operations.
|
||||
|
||||
## Installation
|
||||
## Quick Start
|
||||
|
||||
### Building HeroDB
|
||||
|
||||
To build HeroDB, navigate to the project root and run:
|
||||
|
||||
```bash
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
## Usage
|
||||
### Running HeroDB
|
||||
|
||||
You can start HeroDB with different backends and encryption options:
|
||||
|
||||
#### Default `redb` Backend
|
||||
|
||||
```bash
|
||||
./target/release/herodb --dir /path/to/db --port 6379
|
||||
./target/release/herodb --dir /tmp/herodb_redb --port 6379
|
||||
```
|
||||
|
||||
## RPC Server
|
||||
|
||||
HeroDB includes an optional JSON-RPC 2.0 management server for database administration tasks. Enable it with the `--enable-rpc` flag and specify the port with `--rpc-port` (default: 8080).
|
||||
|
||||
For a complete list of available RPC commands and usage examples, see [RPC_COMMANDS.md](RPC_COMMANDS.md).
|
||||
|
||||
### Options
|
||||
|
||||
- `--dir`: Database directory (required)
|
||||
- `--port`: Server port (default: 6379)
|
||||
- `--debug`: Enable debug logging
|
||||
- `--encrypt`: Enable database encryption
|
||||
- `--encryption-key`: Master encryption key for encrypted databases
|
||||
- `--enable-rpc`: Enable RPC management server
|
||||
- `--rpc-port`: RPC server port (default: 8080)
|
||||
|
||||
### Examples
|
||||
#### `sled` Backend
|
||||
|
||||
```bash
|
||||
# Basic server
|
||||
herodb --dir ./data
|
||||
|
||||
# Encrypted database
|
||||
herodb --dir ./data --encrypt --encryption-key "your-key"
|
||||
|
||||
# Custom port with debug
|
||||
herodb --dir ./data --port 7000 --debug
|
||||
./target/release/herodb --dir /tmp/herodb_sled --port 6379 --sled
|
||||
```
|
||||
|
||||
## Redis Commands
|
||||
|
||||
Supports standard Redis commands including:
|
||||
|
||||
- **Strings**: GET, SET, MGET, MSET, INCR, DEL
|
||||
- **Hashes**: HGET, HSET, HGETALL, HDEL, HEXISTS
|
||||
- **Lists**: LPUSH, RPUSH, LPOP, RPOP, LLEN, LRANGE
|
||||
- **Keys**: KEYS, SCAN, EXISTS, EXPIRE, TTL
|
||||
- **Transactions**: MULTI, EXEC, DISCARD
|
||||
- **Server**: PING, ECHO, INFO, CONFIG
|
||||
|
||||
## AGE Commands
|
||||
|
||||
Extended commands for cryptographic operations:
|
||||
|
||||
- **Key Generation**: `AGE GENENC`, `AGE GENSIGN`, `AGE KEYGEN`
|
||||
- **Encryption**: `AGE ENCRYPT`, `AGE DECRYPT`, `AGE ENCRYPTNAME`
|
||||
- **Signing**: `AGE SIGN`, `AGE VERIFY`, `AGE SIGNNAME`
|
||||
- **Management**: `AGE LIST`
|
||||
|
||||
## Client Usage
|
||||
|
||||
Connect using any Redis client:
|
||||
#### `redb` with Encryption
|
||||
|
||||
```bash
|
||||
redis-cli -p 6379 SET key value
|
||||
redis-cli -p 6379 GET key
|
||||
redis-cli -p 6379 AGE GENENC
|
||||
./target/release/herodb --dir /tmp/herodb_encrypted --port 6379 --encrypt --encryption_key mysecretkey
|
||||
```
|
||||
|
||||
## Architecture
|
||||
#### `sled` with Encryption
|
||||
|
||||
- **Storage**: redb embedded database
|
||||
- **Protocol**: Redis RESP protocol over TCP
|
||||
- **Encryption**: ChaCha20-Poly1305 for data, AGE for operations
|
||||
- **Concurrency**: Tokio async runtime
|
||||
```bash
|
||||
./target/release/herodb --dir /tmp/herodb_sled_encrypted --port 6379 --sled --encrypt --encryption_key mysecretkey
|
||||
```
|
||||
|
||||
## Usage with Redis Clients
|
||||
|
||||
HeroDB can be interacted with using any standard Redis client, such as `redis-cli`, `redis-py` (Python), or `ioredis` (Node.js).
|
||||
|
||||
### Example with `redis-cli`
|
||||
|
||||
```bash
|
||||
redis-cli -p 6379 SET mykey "Hello from HeroDB!"
|
||||
redis-cli -p 6379 GET mykey
|
||||
# → "Hello from HeroDB!"
|
||||
|
||||
redis-cli -p 6379 HSET user:1 name "Alice" age "30"
|
||||
redis-cli -p 6379 HGET user:1 name
|
||||
# → "Alice"
|
||||
|
||||
redis-cli -p 6379 SCAN 0 MATCH user:* COUNT 10
|
||||
# → 1) "0"
|
||||
# 2) 1) "user:1"
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
For more detailed information on commands, features, and advanced usage, please refer to the documentation:
|
||||
|
||||
- [Basics](docs/basics.md)
|
||||
- [Supported Commands](docs/cmds.md)
|
||||
- [AGE Cryptography](docs/age.md)
|
@@ -1,93 +0,0 @@
|
||||
# HeroDB RPC Commands
|
||||
|
||||
HeroDB provides a JSON-RPC 2.0 interface for database management operations. The RPC server runs on a separate port (default 8080) and can be enabled with the `--enable-rpc` flag.
|
||||
|
||||
All RPC methods are prefixed with the namespace `herodb`. With the exception fo the `rpc.discover` call (using the `rpc` namespace), which returns the OpenRPC spec.
|
||||
|
||||
## Available Commands
|
||||
|
||||
### herodb_listDatabases
|
||||
Lists all database indices that exist.
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
curl -X POST http://localhost:8080 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"jsonrpc": "2.0", "method": "herodb_listDatabases", "id": 1}'
|
||||
```
|
||||
|
||||
### herodb_createDatabase
|
||||
Creates a new database at the specified index.
|
||||
|
||||
**Parameters:**
|
||||
- `db_index` (number): Database index to create
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
curl -X POST http://localhost:8080 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"jsonrpc": "2.0", "method": "herodb_createDatabase", "params": [1], "id": 1}'
|
||||
```
|
||||
|
||||
### herodb_getDatabaseInfo
|
||||
Retrieves detailed information about a specific database.
|
||||
|
||||
**Parameters:**
|
||||
- `db_index` (number): Database index
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
curl -X POST http://localhost:8080 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"jsonrpc": "2.0", "method": "herodb_getDatabaseInfo", "params": [0], "id": 1}'
|
||||
```
|
||||
|
||||
### herodb_configureDatabase
|
||||
Configures an existing database with specific settings.
|
||||
|
||||
**Parameters:**
|
||||
- `db_index` (number): Database index
|
||||
- `config` (object): Configuration object
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
curl -X POST http://localhost:8080 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"jsonrpc": "2.0", "method": "herodb_configureDatabase", "params": [0, {"name": "test", "max_size": 1048576}], "id": 1}'
|
||||
```
|
||||
|
||||
### herodb_setDatabaseEncryption
|
||||
Sets encryption for a specific database index.
|
||||
|
||||
**Parameters:**
|
||||
- `db_index` (number): Database index
|
||||
- `encryption_key` (string): Encryption key
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
curl -X POST http://localhost:8080 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"jsonrpc": "2.0", "method": "herodb_setDatabaseEncryption", "params": [10, "my-secret-key"], "id": 1}'
|
||||
```
|
||||
|
||||
### herodb_deleteDatabase
|
||||
Deletes a database and its files.
|
||||
|
||||
**Parameters:**
|
||||
- `db_index` (number): Database index to delete
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
curl -X POST http://localhost:8080 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"jsonrpc": "2.0", "method": "herodb_deleteDatabase", "params": [1], "id": 1}'
|
||||
```
|
||||
|
||||
### herodb_getServerStats
|
||||
Retrieves server statistics.
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
curl -X POST http://localhost:8080 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"jsonrpc": "2.0", "method": "herodb_getServerStats", "id": 1}'
|
125
docs/cmds.md
Normal file
125
docs/cmds.md
Normal file
@@ -0,0 +1,125 @@
|
||||
|
||||
## Backend Support
|
||||
|
||||
HeroDB supports two storage backends, both with full encryption support:
|
||||
|
||||
- **redb** (default): Full-featured, optimized for production use
|
||||
- **sled**: Alternative embedded database with encryption support
|
||||
|
||||
### Starting HeroDB with Different Backends
|
||||
|
||||
```bash
|
||||
# Use default redb backend
|
||||
./target/release/herodb --dir /tmp/herodb_redb --port 6379
|
||||
|
||||
# Use sled backend
|
||||
./target/release/herodb --dir /tmp/herodb_sled --port 6379 --sled
|
||||
|
||||
# Use redb with encryption
|
||||
./target/release/herodb --dir /tmp/herodb_encrypted --port 6379 --encrypt --key mysecretkey
|
||||
|
||||
# Use sled with encryption
|
||||
./target/release/herodb --dir /tmp/herodb_sled_encrypted --port 6379 --sled --encrypt --key mysecretkey
|
||||
```
|
||||
|
||||
### Command Support by Backend
|
||||
|
||||
Command Category | redb | sled | Notes |
|
||||
|-----------------|------|------|-------|
|
||||
**Strings** | | | |
|
||||
SET | ✅ | ✅ | Full support |
|
||||
GET | ✅ | ✅ | Full support |
|
||||
DEL | ✅ | ✅ | Full support |
|
||||
EXISTS | ✅ | ✅ | Full support |
|
||||
INCR/DECR | ✅ | ✅ | Full support |
|
||||
MGET/MSET | ✅ | ✅ | Full support |
|
||||
**Hashes** | | | |
|
||||
HSET | ✅ | ✅ | Full support |
|
||||
HGET | ✅ | ✅ | Full support |
|
||||
HGETALL | ✅ | ✅ | Full support |
|
||||
HDEL | ✅ | ✅ | Full support |
|
||||
HEXISTS | ✅ | ✅ | Full support |
|
||||
HKEYS | ✅ | ✅ | Full support |
|
||||
HVALS | ✅ | ✅ | Full support |
|
||||
HLEN | ✅ | ✅ | Full support |
|
||||
HMGET | ✅ | ✅ | Full support |
|
||||
HSETNX | ✅ | ✅ | Full support |
|
||||
HINCRBY/HINCRBYFLOAT | ✅ | ✅ | Full support |
|
||||
HSCAN | ✅ | ✅ | Full support with pattern matching |
|
||||
**Lists** | | | |
|
||||
LPUSH/RPUSH | ✅ | ✅ | Full support |
|
||||
LPOP/RPOP | ✅ | ✅ | Full support |
|
||||
LLEN | ✅ | ✅ | Full support |
|
||||
LRANGE | ✅ | ✅ | Full support |
|
||||
LINDEX | ✅ | ✅ | Full support |
|
||||
LTRIM | ✅ | ✅ | Full support |
|
||||
LREM | ✅ | ✅ | Full support |
|
||||
BLPOP/BRPOP | ✅ | ❌ | Blocking operations not in sled |
|
||||
**Expiration** | | | |
|
||||
EXPIRE | ✅ | ✅ | Full support in both |
|
||||
TTL | ✅ | ✅ | Full support in both |
|
||||
PERSIST | ✅ | ✅ | Full support in both |
|
||||
SETEX/PSETEX | ✅ | ✅ | Full support in both |
|
||||
EXPIREAT/PEXPIREAT | ✅ | ✅ | Full support in both |
|
||||
**Scanning** | | | |
|
||||
KEYS | ✅ | ✅ | Full support with patterns |
|
||||
SCAN | ✅ | ✅ | Full cursor-based iteration |
|
||||
HSCAN | ✅ | ✅ | Full cursor-based iteration |
|
||||
**Transactions** | | | |
|
||||
MULTI/EXEC/DISCARD | ✅ | ❌ | Only supported in redb |
|
||||
**Encryption** | | | |
|
||||
Data-at-rest encryption | ✅ | ✅ | Both support [age](age.tech) encryption |
|
||||
AGE commands | ✅ | ✅ | Both support AGE crypto commands |
|
||||
**Full-Text Search** | | | |
|
||||
FT.CREATE | ✅ | ✅ | Create search index with schema |
|
||||
FT.ADD | ✅ | ✅ | Add document to search index |
|
||||
FT.SEARCH | ✅ | ✅ | Search documents with query |
|
||||
FT.DEL | ✅ | ✅ | Delete document from index |
|
||||
FT.INFO | ✅ | ✅ | Get index information |
|
||||
FT.DROP | ✅ | ✅ | Drop search index |
|
||||
FT.ALTER | ✅ | ✅ | Alter index schema |
|
||||
FT.AGGREGATE | ✅ | ✅ | Aggregate search results |
|
||||
|
||||
### Performance Considerations
|
||||
|
||||
- **redb**: Optimized for concurrent access, better for high-throughput scenarios
|
||||
- **sled**: Lock-free architecture, excellent for specific workloads
|
||||
|
||||
### Encryption Features
|
||||
|
||||
Both backends support:
|
||||
- Transparent data-at-rest encryption using the `age` encryption library
|
||||
- Per-database encryption (databases >= 10 are encrypted when `--encrypt` flag is used)
|
||||
- Secure key derivation using the master key
|
||||
|
||||
### Backend Selection Examples
|
||||
|
||||
```bash
|
||||
# Example: Testing both backends
|
||||
redis-cli -p 6379 SET mykey "redb value"
|
||||
redis-cli -p 6381 SET mykey "sled value"
|
||||
|
||||
# Example: Using encryption with both
|
||||
./target/release/herodb --port 6379 --encrypt --key secret123
|
||||
./target/release/herodb --port 6381 --sled --encrypt --key secret123
|
||||
|
||||
# Both support the same Redis commands
|
||||
redis-cli -p 6379 HSET user:1 name "Alice" age "30"
|
||||
redis-cli -p 6381 HSET user:1 name "Alice" age "30"
|
||||
|
||||
# Both support SCAN operations
|
||||
redis-cli -p 6379 SCAN 0 MATCH user:* COUNT 10
|
||||
redis-cli -p 6381 SCAN 0 MATCH user:* COUNT 10
|
||||
```
|
||||
|
||||
### Migration Between Backends
|
||||
|
||||
To migrate data between backends, use Redis replication or dump/restore:
|
||||
|
||||
```bash
|
||||
# Export from redb
|
||||
redis-cli -p 6379 --rdb dump.rdb
|
||||
|
||||
# Import to sled
|
||||
redis-cli -p 6381 --pipe < dump.rdb
|
||||
```
|
397
docs/search.md
Normal file
397
docs/search.md
Normal file
@@ -0,0 +1,397 @@
|
||||
# Full-Text Search with Tantivy
|
||||
|
||||
HeroDB includes powerful full-text search capabilities powered by [Tantivy](https://github.com/quickwit-oss/tantivy), a fast full-text search engine library written in Rust. This provides Redis-compatible search commands similar to RediSearch.
|
||||
|
||||
## Overview
|
||||
|
||||
The search functionality allows you to:
|
||||
- Create search indexes with custom schemas
|
||||
- Index documents with multiple field types
|
||||
- Perform complex queries with filters
|
||||
- Support for text, numeric, date, and geographic data
|
||||
- Real-time search with high performance
|
||||
|
||||
## Search Commands
|
||||
|
||||
### FT.CREATE - Create Search Index
|
||||
|
||||
Create a new search index with a defined schema.
|
||||
|
||||
```bash
|
||||
FT.CREATE index_name SCHEMA field_name field_type [options] [field_name field_type [options] ...]
|
||||
```
|
||||
|
||||
**Field Types:**
|
||||
- `TEXT` - Full-text searchable text fields
|
||||
- `NUMERIC` - Numeric fields (integers, floats)
|
||||
- `TAG` - Tag fields for exact matching
|
||||
- `GEO` - Geographic coordinates (lat,lon)
|
||||
- `DATE` - Date/timestamp fields
|
||||
|
||||
**Field Options:**
|
||||
- `STORED` - Store field value for retrieval
|
||||
- `INDEXED` - Make field searchable
|
||||
- `TOKENIZED` - Enable tokenization for text fields
|
||||
- `FAST` - Enable fast access for numeric fields
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
# Create a product search index
|
||||
FT.CREATE products SCHEMA
|
||||
title TEXT STORED INDEXED TOKENIZED
|
||||
description TEXT STORED INDEXED TOKENIZED
|
||||
price NUMERIC STORED INDEXED FAST
|
||||
category TAG STORED
|
||||
location GEO STORED
|
||||
created_date DATE STORED INDEXED
|
||||
```
|
||||
|
||||
### FT.ADD - Add Document to Index
|
||||
|
||||
Add a document to a search index.
|
||||
|
||||
```bash
|
||||
FT.ADD index_name doc_id [SCORE score] FIELDS field_name field_value [field_name field_value ...]
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
# Add a product document
|
||||
FT.ADD products product:1 SCORE 1.0 FIELDS
|
||||
title "Wireless Headphones"
|
||||
description "High-quality wireless headphones with noise cancellation"
|
||||
price 199.99
|
||||
category "electronics"
|
||||
location "37.7749,-122.4194"
|
||||
created_date 1640995200000
|
||||
```
|
||||
|
||||
### FT.SEARCH - Search Documents
|
||||
|
||||
Search for documents in an index.
|
||||
|
||||
```bash
|
||||
FT.SEARCH index_name query [LIMIT offset count] [FILTER field min max] [RETURN field [field ...]]
|
||||
```
|
||||
|
||||
**Query Syntax:**
|
||||
- Simple terms: `wireless headphones`
|
||||
- Phrase queries: `"noise cancellation"`
|
||||
- Field-specific: `title:wireless`
|
||||
- Boolean operators: `wireless AND headphones`
|
||||
- Wildcards: `head*`
|
||||
|
||||
**Examples:**
|
||||
```bash
|
||||
# Simple text search
|
||||
FT.SEARCH products "wireless headphones"
|
||||
|
||||
# Search with filters
|
||||
FT.SEARCH products "headphones" FILTER price 100 300 LIMIT 0 10
|
||||
|
||||
# Field-specific search
|
||||
FT.SEARCH products "title:wireless AND category:electronics"
|
||||
|
||||
# Return specific fields only
|
||||
FT.SEARCH products "*" RETURN title price
|
||||
```
|
||||
|
||||
### FT.DEL - Delete Document
|
||||
|
||||
Remove a document from the search index.
|
||||
|
||||
```bash
|
||||
FT.DEL index_name doc_id
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
FT.DEL products product:1
|
||||
```
|
||||
|
||||
### FT.INFO - Get Index Information
|
||||
|
||||
Get information about a search index.
|
||||
|
||||
```bash
|
||||
FT.INFO index_name
|
||||
```
|
||||
|
||||
**Returns:**
|
||||
- Index name and document count
|
||||
- Field definitions and types
|
||||
- Index configuration
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
FT.INFO products
|
||||
```
|
||||
|
||||
### FT.DROP - Drop Index
|
||||
|
||||
Delete an entire search index.
|
||||
|
||||
```bash
|
||||
FT.DROP index_name
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
FT.DROP products
|
||||
```
|
||||
|
||||
### FT.ALTER - Alter Index Schema
|
||||
|
||||
Add new fields to an existing index.
|
||||
|
||||
```bash
|
||||
FT.ALTER index_name SCHEMA ADD field_name field_type [options]
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
FT.ALTER products SCHEMA ADD brand TAG STORED
|
||||
```
|
||||
|
||||
### FT.AGGREGATE - Aggregate Search Results
|
||||
|
||||
Perform aggregations on search results.
|
||||
|
||||
```bash
|
||||
FT.AGGREGATE index_name query [GROUPBY field] [REDUCE function field AS alias]
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```bash
|
||||
# Group products by category and count
|
||||
FT.AGGREGATE products "*" GROUPBY category REDUCE COUNT 0 AS count
|
||||
```
|
||||
|
||||
## Field Types in Detail
|
||||
|
||||
### TEXT Fields
|
||||
- **Purpose**: Full-text search on natural language content
|
||||
- **Features**: Tokenization, stemming, stop-word removal
|
||||
- **Options**: `STORED`, `INDEXED`, `TOKENIZED`
|
||||
- **Example**: Product titles, descriptions, content
|
||||
|
||||
### NUMERIC Fields
|
||||
- **Purpose**: Numeric data for range queries and sorting
|
||||
- **Types**: I64, U64, F64
|
||||
- **Options**: `STORED`, `INDEXED`, `FAST`
|
||||
- **Example**: Prices, quantities, ratings
|
||||
|
||||
### TAG Fields
|
||||
- **Purpose**: Exact-match categorical data
|
||||
- **Features**: No tokenization, exact string matching
|
||||
- **Options**: `STORED`, case sensitivity control
|
||||
- **Example**: Categories, brands, status values
|
||||
|
||||
### GEO Fields
|
||||
- **Purpose**: Geographic coordinates
|
||||
- **Format**: "latitude,longitude" (e.g., "37.7749,-122.4194")
|
||||
- **Features**: Geographic distance queries
|
||||
- **Options**: `STORED`
|
||||
|
||||
### DATE Fields
|
||||
- **Purpose**: Timestamp and date data
|
||||
- **Format**: Unix timestamp in milliseconds
|
||||
- **Features**: Range queries, temporal filtering
|
||||
- **Options**: `STORED`, `INDEXED`, `FAST`
|
||||
|
||||
## Search Query Syntax
|
||||
|
||||
### Basic Queries
|
||||
```bash
|
||||
# Single term
|
||||
FT.SEARCH products "wireless"
|
||||
|
||||
# Multiple terms (AND by default)
|
||||
FT.SEARCH products "wireless headphones"
|
||||
|
||||
# Phrase query
|
||||
FT.SEARCH products "\"noise cancellation\""
|
||||
```
|
||||
|
||||
### Field-Specific Queries
|
||||
```bash
|
||||
# Search in specific field
|
||||
FT.SEARCH products "title:wireless"
|
||||
|
||||
# Multiple field queries
|
||||
FT.SEARCH products "title:wireless AND description:bluetooth"
|
||||
```
|
||||
|
||||
### Boolean Operators
|
||||
```bash
|
||||
# AND operator
|
||||
FT.SEARCH products "wireless AND headphones"
|
||||
|
||||
# OR operator
|
||||
FT.SEARCH products "wireless OR bluetooth"
|
||||
|
||||
# NOT operator
|
||||
FT.SEARCH products "headphones NOT wired"
|
||||
```
|
||||
|
||||
### Wildcards and Fuzzy Search
|
||||
```bash
|
||||
# Wildcard search
|
||||
FT.SEARCH products "head*"
|
||||
|
||||
# Fuzzy search (approximate matching)
|
||||
FT.SEARCH products "%headphone%"
|
||||
```
|
||||
|
||||
### Range Queries
|
||||
```bash
|
||||
# Numeric range in query
|
||||
FT.SEARCH products "@price:[100 300]"
|
||||
|
||||
# Date range
|
||||
FT.SEARCH products "@created_date:[1640995200000 1672531200000]"
|
||||
```
|
||||
|
||||
## Filtering and Sorting
|
||||
|
||||
### FILTER Clause
|
||||
```bash
|
||||
# Numeric filter
|
||||
FT.SEARCH products "headphones" FILTER price 100 300
|
||||
|
||||
# Multiple filters
|
||||
FT.SEARCH products "*" FILTER price 100 500 FILTER rating 4 5
|
||||
```
|
||||
|
||||
### LIMIT Clause
|
||||
```bash
|
||||
# Pagination
|
||||
FT.SEARCH products "wireless" LIMIT 0 10 # First 10 results
|
||||
FT.SEARCH products "wireless" LIMIT 10 10 # Next 10 results
|
||||
```
|
||||
|
||||
### RETURN Clause
|
||||
```bash
|
||||
# Return specific fields
|
||||
FT.SEARCH products "*" RETURN title price
|
||||
|
||||
# Return all stored fields (default)
|
||||
FT.SEARCH products "*"
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Indexing Strategy
|
||||
- Only index fields you need to search on
|
||||
- Use `FAST` option for frequently filtered numeric fields
|
||||
- Consider storage vs. search performance trade-offs
|
||||
|
||||
### Query Optimization
|
||||
- Use specific field queries when possible
|
||||
- Combine filters with text queries for better performance
|
||||
- Use pagination with LIMIT for large result sets
|
||||
|
||||
### Memory Usage
|
||||
- Tantivy indexes are memory-mapped for performance
|
||||
- Index size depends on document count and field configuration
|
||||
- Monitor disk space for index storage
|
||||
|
||||
## Integration with Redis Commands
|
||||
|
||||
Search indexes work alongside regular Redis data:
|
||||
|
||||
```bash
|
||||
# Store product data in Redis hash
|
||||
HSET product:1 title "Wireless Headphones" price "199.99"
|
||||
|
||||
# Index the same data for search
|
||||
FT.ADD products product:1 FIELDS title "Wireless Headphones" price 199.99
|
||||
|
||||
# Search returns document IDs that can be used with Redis commands
|
||||
FT.SEARCH products "wireless"
|
||||
# Returns: product:1
|
||||
|
||||
# Retrieve full data using Redis
|
||||
HGETALL product:1
|
||||
```
|
||||
|
||||
## Example Use Cases
|
||||
|
||||
### E-commerce Product Search
|
||||
```bash
|
||||
# Create product catalog index
|
||||
FT.CREATE catalog SCHEMA
|
||||
name TEXT STORED INDEXED TOKENIZED
|
||||
description TEXT INDEXED TOKENIZED
|
||||
price NUMERIC STORED INDEXED FAST
|
||||
category TAG STORED
|
||||
brand TAG STORED
|
||||
rating NUMERIC STORED FAST
|
||||
|
||||
# Add products
|
||||
FT.ADD catalog prod:1 FIELDS name "iPhone 14" price 999 category "phones" brand "apple" rating 4.5
|
||||
FT.ADD catalog prod:2 FIELDS name "Samsung Galaxy" price 899 category "phones" brand "samsung" rating 4.3
|
||||
|
||||
# Search queries
|
||||
FT.SEARCH catalog "iPhone"
|
||||
FT.SEARCH catalog "phones" FILTER price 800 1000
|
||||
FT.SEARCH catalog "@brand:apple"
|
||||
```
|
||||
|
||||
### Content Management
|
||||
```bash
|
||||
# Create content index
|
||||
FT.CREATE content SCHEMA
|
||||
title TEXT STORED INDEXED TOKENIZED
|
||||
body TEXT INDEXED TOKENIZED
|
||||
author TAG STORED
|
||||
published DATE STORED INDEXED
|
||||
tags TAG STORED
|
||||
|
||||
# Search content
|
||||
FT.SEARCH content "machine learning"
|
||||
FT.SEARCH content "@author:john AND @tags:ai"
|
||||
FT.SEARCH content "*" FILTER published 1640995200000 1672531200000
|
||||
```
|
||||
|
||||
### Geographic Search
|
||||
```bash
|
||||
# Create location-based index
|
||||
FT.CREATE places SCHEMA
|
||||
name TEXT STORED INDEXED TOKENIZED
|
||||
location GEO STORED
|
||||
type TAG STORED
|
||||
|
||||
# Add locations
|
||||
FT.ADD places place:1 FIELDS name "Golden Gate Bridge" location "37.8199,-122.4783" type "landmark"
|
||||
|
||||
# Geographic queries (future feature)
|
||||
FT.SEARCH places "@location:[37.7749 -122.4194 10 km]"
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
Common error responses:
|
||||
- `ERR index not found` - Index doesn't exist
|
||||
- `ERR field not found` - Field not defined in schema
|
||||
- `ERR invalid query syntax` - Malformed query
|
||||
- `ERR document not found` - Document ID doesn't exist
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Schema Design**: Plan your schema carefully - changes require reindexing
|
||||
2. **Field Selection**: Only store and index fields you actually need
|
||||
3. **Batch Operations**: Add multiple documents efficiently
|
||||
4. **Query Testing**: Test queries for performance with realistic data
|
||||
5. **Monitoring**: Monitor index size and query performance
|
||||
6. **Backup**: Include search indexes in backup strategies
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
Planned features:
|
||||
- Geographic distance queries
|
||||
- Advanced aggregations and faceting
|
||||
- Highlighting of search results
|
||||
- Synonyms and custom analyzers
|
||||
- Real-time suggestions and autocomplete
|
||||
- Index replication and sharding
|
171
examples/README.md
Normal file
171
examples/README.md
Normal file
@@ -0,0 +1,171 @@
|
||||
# HeroDB Tantivy Search Examples
|
||||
|
||||
This directory contains examples demonstrating HeroDB's full-text search capabilities powered by Tantivy.
|
||||
|
||||
## Tantivy Search Demo (Bash Script)
|
||||
|
||||
### Overview
|
||||
The `tantivy_search_demo.sh` script provides a comprehensive demonstration of HeroDB's search functionality using Redis commands. It showcases various search scenarios including basic text search, filtering, sorting, geographic queries, and more.
|
||||
|
||||
### Prerequisites
|
||||
1. **HeroDB Server**: The server must be running on port 6381
|
||||
2. **Redis CLI**: The `redis-cli` tool must be installed and available in your PATH
|
||||
|
||||
### Running the Demo
|
||||
|
||||
#### Step 1: Start HeroDB Server
|
||||
```bash
|
||||
# From the project root directory
|
||||
cargo run -- --port 6381
|
||||
```
|
||||
|
||||
#### Step 2: Run the Demo (in a new terminal)
|
||||
```bash
|
||||
# From the project root directory
|
||||
./examples/tantivy_search_demo.sh
|
||||
```
|
||||
|
||||
### What the Demo Covers
|
||||
|
||||
The script demonstrates 15 different search scenarios:
|
||||
|
||||
1. **Index Creation** - Creating a search index with various field types
|
||||
2. **Data Insertion** - Adding sample products to the index
|
||||
3. **Basic Text Search** - Simple keyword searches
|
||||
4. **Filtered Search** - Combining text search with category filters
|
||||
5. **Numeric Range Search** - Finding products within price ranges
|
||||
6. **Sorting Results** - Ordering results by different fields
|
||||
7. **Limited Results** - Pagination and result limiting
|
||||
8. **Complex Queries** - Multi-field searches with sorting
|
||||
9. **Geographic Search** - Location-based queries
|
||||
10. **Index Information** - Getting statistics about the search index
|
||||
11. **Search Comparison** - Tantivy vs simple pattern matching
|
||||
12. **Fuzzy Search** - Typo tolerance and approximate matching
|
||||
13. **Phrase Search** - Exact phrase matching
|
||||
14. **Boolean Queries** - AND, OR, NOT operators
|
||||
15. **Cleanup** - Removing test data
|
||||
|
||||
### Sample Data
|
||||
|
||||
The demo uses a product catalog with the following fields:
|
||||
- **title** (TEXT) - Product name with higher search weight
|
||||
- **description** (TEXT) - Detailed product description
|
||||
- **category** (TAG) - Comma-separated categories
|
||||
- **price** (NUMERIC) - Product price for range queries
|
||||
- **rating** (NUMERIC) - Customer rating for sorting
|
||||
- **location** (GEO) - Geographic coordinates for location searches
|
||||
|
||||
### Key Redis Commands Demonstrated
|
||||
|
||||
#### Index Management
|
||||
```bash
|
||||
# Create search index
|
||||
FT.CREATE product_catalog ON HASH PREFIX 1 product: SCHEMA title TEXT WEIGHT 2.0 SORTABLE description TEXT category TAG SEPARATOR , price NUMERIC SORTABLE rating NUMERIC SORTABLE location GEO
|
||||
|
||||
# Get index information
|
||||
FT.INFO product_catalog
|
||||
|
||||
# Drop index
|
||||
FT.DROPINDEX product_catalog
|
||||
```
|
||||
|
||||
#### Search Queries
|
||||
```bash
|
||||
# Basic text search
|
||||
FT.SEARCH product_catalog wireless
|
||||
|
||||
# Filtered search
|
||||
FT.SEARCH product_catalog 'organic @category:{food}'
|
||||
|
||||
# Numeric range
|
||||
FT.SEARCH product_catalog '@price:[50 150]'
|
||||
|
||||
# Sorted results
|
||||
FT.SEARCH product_catalog '@category:{electronics}' SORTBY price ASC
|
||||
|
||||
# Geographic search
|
||||
FT.SEARCH product_catalog '@location:[37.7749 -122.4194 50 km]'
|
||||
|
||||
# Boolean queries
|
||||
FT.SEARCH product_catalog 'wireless AND audio'
|
||||
FT.SEARCH product_catalog 'coffee OR tea'
|
||||
|
||||
# Phrase search
|
||||
FT.SEARCH product_catalog '"noise canceling"'
|
||||
```
|
||||
|
||||
### Interactive Features
|
||||
|
||||
The demo script includes:
|
||||
- **Colored output** for better readability
|
||||
- **Pause between steps** to review results
|
||||
- **Error handling** with clear error messages
|
||||
- **Automatic cleanup** of test data
|
||||
- **Progress indicators** showing what each step demonstrates
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
#### HeroDB Not Running
|
||||
```
|
||||
✗ HeroDB is not running on port 6381
|
||||
ℹ Please start HeroDB with: cargo run -- --port 6381
|
||||
```
|
||||
**Solution**: Start the HeroDB server in a separate terminal.
|
||||
|
||||
#### Redis CLI Not Found
|
||||
```
|
||||
redis-cli: command not found
|
||||
```
|
||||
**Solution**: Install Redis tools or use an alternative Redis client.
|
||||
|
||||
#### Connection Refused
|
||||
```
|
||||
Could not connect to Redis at localhost:6381: Connection refused
|
||||
```
|
||||
**Solution**: Ensure HeroDB is running and listening on the correct port.
|
||||
|
||||
### Manual Testing
|
||||
|
||||
You can also run individual commands manually:
|
||||
|
||||
```bash
|
||||
# Connect to HeroDB
|
||||
redis-cli -h localhost -p 6381
|
||||
|
||||
# Create a simple index
|
||||
FT.CREATE myindex ON HASH SCHEMA title TEXT description TEXT
|
||||
|
||||
# Add a document
|
||||
HSET doc:1 title "Hello World" description "This is a test document"
|
||||
|
||||
# Search
|
||||
FT.SEARCH myindex hello
|
||||
```
|
||||
|
||||
### Performance Notes
|
||||
|
||||
- **Indexing**: Documents are indexed in real-time as they're added
|
||||
- **Search Speed**: Full-text search is much faster than pattern matching on large datasets
|
||||
- **Memory Usage**: Tantivy indexes are memory-efficient and disk-backed
|
||||
- **Scalability**: Supports millions of documents with sub-second search times
|
||||
|
||||
### Advanced Features
|
||||
|
||||
The demo showcases advanced Tantivy features:
|
||||
- **Relevance Scoring** - Results ranked by relevance
|
||||
- **Fuzzy Matching** - Handles typos and approximate matches
|
||||
- **Field Weighting** - Title field has higher search weight
|
||||
- **Multi-field Search** - Search across multiple fields simultaneously
|
||||
- **Geographic Queries** - Distance-based location searches
|
||||
- **Numeric Ranges** - Efficient range queries on numeric fields
|
||||
- **Tag Filtering** - Fast categorical filtering
|
||||
|
||||
### Next Steps
|
||||
|
||||
After running the demo, explore:
|
||||
1. **Custom Schemas** - Define your own field types and configurations
|
||||
2. **Large Datasets** - Test with thousands or millions of documents
|
||||
3. **Real Applications** - Integrate search into your applications
|
||||
4. **Performance Tuning** - Optimize for your specific use case
|
||||
|
||||
For more information, see the [search documentation](../herodb/docs/search.md).
|
186
examples/simple_demo.sh
Normal file
186
examples/simple_demo.sh
Normal file
@@ -0,0 +1,186 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Simple HeroDB Demo - Basic Redis Commands
|
||||
# This script demonstrates basic Redis functionality that's currently implemented
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
# Configuration
|
||||
REDIS_HOST="localhost"
|
||||
REDIS_PORT="6381"
|
||||
REDIS_CLI="redis-cli -h $REDIS_HOST -p $REDIS_PORT"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to print colored output
|
||||
print_header() {
|
||||
echo -e "${BLUE}=== $1 ===${NC}"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}✓ $1${NC}"
|
||||
}
|
||||
|
||||
print_info() {
|
||||
echo -e "${YELLOW}ℹ $1${NC}"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}✗ $1${NC}"
|
||||
}
|
||||
|
||||
# Function to check if HeroDB is running
|
||||
check_herodb() {
|
||||
print_info "Checking if HeroDB is running on port $REDIS_PORT..."
|
||||
if ! $REDIS_CLI ping > /dev/null 2>&1; then
|
||||
print_error "HeroDB is not running on port $REDIS_PORT"
|
||||
print_info "Please start HeroDB with: cargo run -- --port $REDIS_PORT"
|
||||
exit 1
|
||||
fi
|
||||
print_success "HeroDB is running and responding"
|
||||
}
|
||||
|
||||
# Function to execute Redis command with error handling
|
||||
execute_cmd() {
|
||||
local cmd="$1"
|
||||
local description="$2"
|
||||
|
||||
echo -e "${YELLOW}Command:${NC} $cmd"
|
||||
if result=$($REDIS_CLI $cmd 2>&1); then
|
||||
echo -e "${GREEN}Result:${NC} $result"
|
||||
return 0
|
||||
else
|
||||
print_error "Failed: $description"
|
||||
echo "Error: $result"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main demo function
|
||||
main() {
|
||||
clear
|
||||
print_header "HeroDB Basic Functionality Demo"
|
||||
echo "This demo shows basic Redis commands that are currently implemented"
|
||||
echo "HeroDB runs on port $REDIS_PORT (instead of Redis default 6379)"
|
||||
echo
|
||||
|
||||
# Check if HeroDB is running
|
||||
check_herodb
|
||||
echo
|
||||
|
||||
print_header "Step 1: Basic Key-Value Operations"
|
||||
|
||||
execute_cmd "SET greeting 'Hello HeroDB!'" "Setting a simple key-value pair"
|
||||
echo
|
||||
execute_cmd "GET greeting" "Getting the value"
|
||||
echo
|
||||
execute_cmd "SET counter 42" "Setting a numeric value"
|
||||
echo
|
||||
execute_cmd "INCR counter" "Incrementing the counter"
|
||||
echo
|
||||
execute_cmd "GET counter" "Getting the incremented value"
|
||||
echo
|
||||
|
||||
print_header "Step 2: Hash Operations"
|
||||
|
||||
execute_cmd "HSET user:1 name 'John Doe' email 'john@example.com' age 30" "Setting hash fields"
|
||||
echo
|
||||
execute_cmd "HGET user:1 name" "Getting a specific field"
|
||||
echo
|
||||
execute_cmd "HGETALL user:1" "Getting all fields"
|
||||
echo
|
||||
execute_cmd "HLEN user:1" "Getting hash length"
|
||||
echo
|
||||
|
||||
print_header "Step 3: List Operations"
|
||||
|
||||
execute_cmd "LPUSH tasks 'Write code' 'Test code' 'Deploy code'" "Adding items to list"
|
||||
echo
|
||||
execute_cmd "LLEN tasks" "Getting list length"
|
||||
echo
|
||||
execute_cmd "LRANGE tasks 0 -1" "Getting all list items"
|
||||
echo
|
||||
execute_cmd "LPOP tasks" "Popping from left"
|
||||
echo
|
||||
execute_cmd "LRANGE tasks 0 -1" "Checking remaining items"
|
||||
echo
|
||||
|
||||
print_header "Step 4: Key Management"
|
||||
|
||||
execute_cmd "KEYS *" "Listing all keys"
|
||||
echo
|
||||
execute_cmd "EXISTS greeting" "Checking if key exists"
|
||||
echo
|
||||
execute_cmd "TYPE user:1" "Getting key type"
|
||||
echo
|
||||
execute_cmd "DBSIZE" "Getting database size"
|
||||
echo
|
||||
|
||||
print_header "Step 5: Expiration"
|
||||
|
||||
execute_cmd "SET temp_key 'temporary value'" "Setting temporary key"
|
||||
echo
|
||||
execute_cmd "EXPIRE temp_key 5" "Setting 5 second expiration"
|
||||
echo
|
||||
execute_cmd "TTL temp_key" "Checking time to live"
|
||||
echo
|
||||
print_info "Waiting 2 seconds..."
|
||||
sleep 2
|
||||
execute_cmd "TTL temp_key" "Checking TTL again"
|
||||
echo
|
||||
|
||||
print_header "Step 6: Multiple Operations"
|
||||
|
||||
execute_cmd "MSET key1 'value1' key2 'value2' key3 'value3'" "Setting multiple keys"
|
||||
echo
|
||||
execute_cmd "MGET key1 key2 key3" "Getting multiple values"
|
||||
echo
|
||||
execute_cmd "DEL key1 key2" "Deleting multiple keys"
|
||||
echo
|
||||
execute_cmd "EXISTS key1 key2 key3" "Checking existence of multiple keys"
|
||||
echo
|
||||
|
||||
print_header "Step 7: Search Commands (Placeholder)"
|
||||
print_info "Testing FT.CREATE command (currently returns placeholder response)"
|
||||
|
||||
execute_cmd "FT.CREATE test_index SCHEMA title TEXT description TEXT" "Creating search index"
|
||||
echo
|
||||
|
||||
print_header "Step 8: Server Information"
|
||||
|
||||
execute_cmd "INFO" "Getting server information"
|
||||
echo
|
||||
execute_cmd "CONFIG GET dir" "Getting configuration"
|
||||
echo
|
||||
|
||||
print_header "Step 9: Cleanup"
|
||||
|
||||
execute_cmd "FLUSHDB" "Clearing database"
|
||||
echo
|
||||
execute_cmd "DBSIZE" "Confirming database is empty"
|
||||
echo
|
||||
|
||||
print_header "Demo Summary"
|
||||
echo "This demonstration showed:"
|
||||
echo "• Basic key-value operations (GET, SET, INCR)"
|
||||
echo "• Hash operations (HSET, HGET, HGETALL)"
|
||||
echo "• List operations (LPUSH, LPOP, LRANGE)"
|
||||
echo "• Key management (KEYS, EXISTS, TYPE, DEL)"
|
||||
echo "• Expiration handling (EXPIRE, TTL)"
|
||||
echo "• Multiple key operations (MSET, MGET)"
|
||||
echo "• Server information commands"
|
||||
echo
|
||||
print_success "HeroDB basic functionality demo completed successfully!"
|
||||
echo
|
||||
print_info "Note: Full-text search (FT.*) commands are defined but not yet fully implemented"
|
||||
print_info "To run HeroDB server: cargo run -- --port 6381"
|
||||
print_info "To connect with redis-cli: redis-cli -h localhost -p 6381"
|
||||
}
|
||||
|
||||
# Run the demo
|
||||
main "$@"
|
@@ -1,29 +0,0 @@
|
||||
[package]
|
||||
name = "herodb"
|
||||
version = "0.0.1"
|
||||
authors = ["ThreeFold Tech"]
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.59"
|
||||
bytes = "1.3.0"
|
||||
thiserror = "1.0.32"
|
||||
tokio = { version = "1.23.0", features = ["full"] }
|
||||
clap = { version = "4.5.20", features = ["derive"] }
|
||||
byteorder = "1.4.3"
|
||||
futures = "0.3"
|
||||
redb = "2.1.3"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
bincode = "1.3.3"
|
||||
chacha20poly1305 = "0.10.1"
|
||||
rand = "0.8"
|
||||
sha2 = "0.10"
|
||||
age = "0.10"
|
||||
secrecy = "0.8"
|
||||
ed25519-dalek = "2"
|
||||
base64 = "0.22"
|
||||
jsonrpsee = { version = "0.26", features = ["http-client", "ws-client", "server", "macros"] }
|
||||
|
||||
[dev-dependencies]
|
||||
redis = { version = "0.24", features = ["aio", "tokio-comp"] }
|
@@ -1,227 +0,0 @@
|
||||
|
||||
# HeroDB Redis Protocol Support: Commands & Client Usage
|
||||
|
||||
HeroDB is a Redis-compatible database built using the `redb` database backend.
|
||||
|
||||
It supports a subset of Redis commands over the standard RESP (Redis Serialization Protocol) via TCP, allowing you to interact with it using standard Redis clients like `redis-cli`, Python's `redis-py`, Node.js's `ioredis`, etc.
|
||||
|
||||
This document provides:
|
||||
- A list of all currently supported Redis commands.
|
||||
- Example usage with standard Redis clients.
|
||||
- Bash and Rust test-inspired usage examples.
|
||||
|
||||
## Quick Start
|
||||
|
||||
Assuming the server is running on localhost at port `$PORT`:
|
||||
|
||||
```bash
|
||||
# Build HeroDB
|
||||
cargo build --release
|
||||
|
||||
# Start HeroDB server
|
||||
./target/release/herodb --dir /tmp/herodb_data --port 6381 --debug
|
||||
```
|
||||
|
||||
## Using Standard Redis Clients
|
||||
|
||||
### With `redis-cli`
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 SET mykey "hello"
|
||||
redis-cli -p 6381 GET mykey
|
||||
```
|
||||
|
||||
### With Python (`redis-py`)
|
||||
|
||||
```python
|
||||
import redis
|
||||
|
||||
r = redis.Redis(host='localhost', port=6381, db=0)
|
||||
r.set('mykey', 'hello')
|
||||
print(r.get('mykey').decode())
|
||||
```
|
||||
|
||||
### With Node.js (`ioredis`)
|
||||
|
||||
```js
|
||||
const Redis = require("ioredis");
|
||||
const redis = new Redis({ port: 6381, host: "localhost" });
|
||||
|
||||
await redis.set("mykey", "hello");
|
||||
const value = await redis.get("mykey");
|
||||
console.log(value); // "hello"
|
||||
```
|
||||
|
||||
## Supported Redis Commands
|
||||
|
||||
### String Commands
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `SET` | Set a key to a string value | `SET name "Alice"` |
|
||||
| `GET` | Get the value of a key | `GET name` |
|
||||
| `DEL` | Delete one or more keys | `DEL name age` |
|
||||
| `INCR` | Increment the integer value of a key | `INCR counter` |
|
||||
| `DECR` | Decrement the integer value of a key | `DECR counter` |
|
||||
| `INCRBY` | Increment key by a given integer | `INCRBY counter 5` |
|
||||
| `DECRBY` | Decrement key by a given integer | `DECRBY counter 3` |
|
||||
| `EXISTS` | Check if a key exists | `EXISTS name` |
|
||||
| `TYPE` | Return the type of a key | `TYPE name` |
|
||||
|
||||
### Hash Commands
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `HSET` | Set field in hash stored at key | `HSET user:1 name "Alice"` |
|
||||
| `HGET` | Get value of a field in hash | `HGET user:1 name` |
|
||||
| `HGETALL` | Get all fields and values in a hash | `HGETALL user:1` |
|
||||
| `HDEL` | Delete one or more fields from hash | `HDEL user:1 name age` |
|
||||
| `HEXISTS` | Check if field exists in hash | `HEXISTS user:1 name` |
|
||||
| `HKEYS` | Get all field names in a hash | `HKEYS user:1` |
|
||||
| `HVALS` | Get all values in a hash | `HVALS user:1` |
|
||||
| `HLEN` | Get number of fields in a hash | `HLEN user:1` |
|
||||
| `HMGET` | Get values of multiple fields | `HMGET user:1 name age` |
|
||||
| `HSETNX` | Set field only if it does not exist | `HSETNX user:1 email alice@example.com` |
|
||||
|
||||
### List Commands
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `LPUSH` | Insert elements at the head of a list | `LPUSH mylist "item1" "item2"` |
|
||||
| `RPUSH` | Insert elements at the tail of a list | `RPUSH mylist "item3" "item4"` |
|
||||
| `LPOP` | Remove and return element from head | `LPOP mylist` |
|
||||
| `RPOP` | Remove and return element from tail | `RPOP mylist` |
|
||||
| `BLPOP` | Blocking remove from head with timeout | `BLPOP mylist1 mylist2 5` |
|
||||
| `BRPOP` | Blocking remove from tail with timeout | `BRPOP mylist1 mylist2 5` |
|
||||
| `LLEN` | Get the length of a list | `LLEN mylist` |
|
||||
| `LREM` | Remove elements from list | `LREM mylist 2 "item"` |
|
||||
| `LTRIM` | Trim list to specified range | `LTRIM mylist 0 5` |
|
||||
| `LINDEX` | Get element by index | `LINDEX mylist 0` |
|
||||
| `LRANGE` | Get range of elements | `LRANGE mylist 0 -1` |
|
||||
|
||||
### Keys & Scanning
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `KEYS` | Find all keys matching a pattern | `KEYS user:*` |
|
||||
| `SCAN` | Incrementally iterate keys | `SCAN 0 MATCH user:* COUNT 10` |
|
||||
|
||||
### Expiration
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `EXPIRE` | Set a key's time to live in seconds | `EXPIRE tempkey 60` |
|
||||
| `TTL` | Get the time to live for a key | `TTL tempkey` |
|
||||
| `PERSIST` | Remove the expiration from a key | `PERSIST tempkey` |
|
||||
|
||||
### Transactions
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `MULTI` | Start a transaction block | `MULTI` |
|
||||
| `EXEC` | Execute all commands in a transaction | `EXEC` |
|
||||
| `DISCARD` | Discard all commands in a transaction | `DISCARD` |
|
||||
|
||||
### Configuration
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `CONFIG GET` | Get configuration parameters | `CONFIG GET dir` |
|
||||
| `CONFIG SET` | Set configuration parameters | `CONFIG SET maxmemory 100mb` |
|
||||
|
||||
### Info & Monitoring
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|---------------|------------------------------------------|-------------------------------------------|
|
||||
| `INFO` | Get information and statistics about server | `INFO` |
|
||||
| `PING` | Ping the server | `PING` |
|
||||
|
||||
### AGE Cryptography Commands
|
||||
|
||||
| Command | Description | Example Usage |
|
||||
|--------------------|-----------------------------------------------|-----------------------------------------------|
|
||||
| `AGE GENENC` | Generate ephemeral encryption keypair | `AGE GENENC` |
|
||||
| `AGE GENSIGN` | Generate ephemeral signing keypair | `AGE GENSIGN` |
|
||||
| `AGE ENCRYPT` | Encrypt a message using a public key | `AGE ENCRYPT <recipient> "msg"` |
|
||||
| `AGE DECRYPT` | Decrypt a message using a secret key | `AGE DECRYPT <identity> <ciphertext>` |
|
||||
| `AGE SIGN` | Sign a message using a secret key | `AGE SIGN <sign_secret> "msg"` |
|
||||
| `AGE VERIFY` | Verify a signature using a public key | `AGE VERIFY <pubkey> "msg" <signature>` |
|
||||
| `AGE KEYGEN` | Create and persist a named encryption key | `AGE KEYGEN app1` |
|
||||
| `AGE SIGNKEYGEN` | Create and persist a named signing key | `AGE SIGNKEYGEN app1` |
|
||||
| `AGE ENCRYPTNAME` | Encrypt using a named key | `AGE ENCRYPTNAME app1 "msg"` |
|
||||
| `AGE DECRYPTNAME` | Decrypt using a named key | `AGE DECRYPTNAME app1 <ciphertext>` |
|
||||
| `AGE SIGNNAME` | Sign using a named key | `AGE SIGNNAME app1 "msg"` |
|
||||
| `AGE VERIFYNAME` | Verify using a named key | `AGE VERIFYNAME app1 "msg" <signature>` |
|
||||
| `AGE LIST` | List all persisted named keys | `AGE LIST` |
|
||||
|
||||
> Note: AGE commands are not part of standard Redis. They are HeroDB-specific extensions for cryptographic operations.
|
||||
|
||||
## Example Usage
|
||||
|
||||
### Basic String Operations
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 SET greeting "Hello, HeroDB!"
|
||||
redis-cli -p 6381 GET greeting
|
||||
# → "Hello, HeroDB!"
|
||||
|
||||
redis-cli -p 6381 INCR visits
|
||||
redis-cli -p 6381 INCR visits
|
||||
redis-cli -p 6381 GET visits
|
||||
# → "2"
|
||||
```
|
||||
|
||||
### Hash Operations
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 HSET user:1000 name "Alice" age "30" city "NYC"
|
||||
redis-cli -p 6381 HGET user:1000 name
|
||||
# → "Alice"
|
||||
|
||||
redis-cli -p 6381 HGETALL user:1000
|
||||
# → 1) "name"
|
||||
# 2) "Alice"
|
||||
# 3) "age"
|
||||
# 4) "30"
|
||||
# 5) "city"
|
||||
# 6) "NYC"
|
||||
```
|
||||
|
||||
### Expiration
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 SET tempkey "temporary"
|
||||
redis-cli -p 6381 EXPIRE tempkey 5
|
||||
redis-cli -p 6381 TTL tempkey
|
||||
# → (integer) 4
|
||||
|
||||
# After 5 seconds:
|
||||
redis-cli -p 6381 GET tempkey
|
||||
# → (nil)
|
||||
```
|
||||
|
||||
### Transactions
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 MULTI
|
||||
redis-cli -p 6381 SET txkey1 "value1"
|
||||
redis-cli -p 6381 SET txkey2 "value2"
|
||||
redis-cli -p 6381 INCR counter
|
||||
redis-cli -p 6381 EXEC
|
||||
# → 1) OK
|
||||
# 2) OK
|
||||
# 3) (integer) 3
|
||||
```
|
||||
|
||||
### Scanning Keys
|
||||
|
||||
```bash
|
||||
redis-cli -p 6381 SET scankey1 "val1"
|
||||
redis-cli -p 6381 SET scankey2 "val2"
|
||||
redis-cli -p 6381 HSET scanhash field1 "val1"
|
||||
|
||||
redis-cli -p 6381 SCAN 0 MATCH scankey*
|
||||
# → 1) "0"
|
||||
# 2) 1) "scankey1"
|
||||
# 2) "scankey2"
|
||||
```
|
@@ -1,290 +0,0 @@
|
||||
{
|
||||
"openrpc": "1.2.6",
|
||||
"info": {
|
||||
"title": "HeroDB RPC API",
|
||||
"version": "0.0.1",
|
||||
"description": "Database management API for HeroDB"
|
||||
},
|
||||
"servers": [
|
||||
{
|
||||
"name": "HeroDB Server",
|
||||
"url": "http://localhost:8080"
|
||||
}
|
||||
],
|
||||
"methods": [
|
||||
{
|
||||
"name": "herodb_configureDatabase",
|
||||
"summary": "Configure an existing database with specific settings",
|
||||
"params": [
|
||||
{
|
||||
"name": "db_index",
|
||||
"description": "Database index to configure",
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "config",
|
||||
"description": "Configuration object",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"storage_path": {
|
||||
"type": "string"
|
||||
},
|
||||
"max_size": {
|
||||
"type": "integer"
|
||||
},
|
||||
"redis_version": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "herodb_createDatabase",
|
||||
"summary": "Create/pre-initialize a database at the specified index",
|
||||
"params": [
|
||||
{
|
||||
"name": "db_index",
|
||||
"description": "Database index to create",
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "herodb_setDatabaseEncryption",
|
||||
"summary": "Set encryption for a specific database index",
|
||||
"params": [
|
||||
{
|
||||
"name": "db_index",
|
||||
"description": "Database index",
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"name": "encryption_key",
|
||||
"description": "Encryption key (write-only)",
|
||||
"schema": {
|
||||
"type": "string"
|
||||
},
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "herodb_listDatabases",
|
||||
"summary": "List all database indices that exist",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "database_indices",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "herodb_getDatabaseInfo",
|
||||
"summary": "Get detailed information about a specific database",
|
||||
"params": [
|
||||
{
|
||||
"name": "db_index",
|
||||
"description": "Database index",
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "database_info",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
},
|
||||
"backend": {
|
||||
"type": "string",
|
||||
"enum": ["Redb"]
|
||||
},
|
||||
"encrypted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"redis_version": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
},
|
||||
"storage_path": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
},
|
||||
"size_on_disk": {
|
||||
"type": "integer",
|
||||
"nullable": true
|
||||
},
|
||||
"key_count": {
|
||||
"type": "integer",
|
||||
"nullable": true
|
||||
},
|
||||
"created_at": {
|
||||
"type": "integer"
|
||||
},
|
||||
"last_access": {
|
||||
"type": "integer",
|
||||
"nullable": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "herodb_deleteDatabase",
|
||||
"summary": "Delete a database and its files",
|
||||
"params": [
|
||||
{
|
||||
"name": "db_index",
|
||||
"description": "Database index to delete",
|
||||
"schema": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"result": {
|
||||
"name": "success",
|
||||
"schema": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "herodb_getServerStats",
|
||||
"summary": "Get server statistics",
|
||||
"params": [],
|
||||
"result": {
|
||||
"name": "stats",
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "integer"},
|
||||
{"type": "boolean"},
|
||||
{"type": "array"}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"components": {
|
||||
"schemas": {
|
||||
"DatabaseConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
},
|
||||
"storage_path": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
},
|
||||
"max_size": {
|
||||
"type": "integer",
|
||||
"nullable": true
|
||||
},
|
||||
"redis_version": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"DatabaseInfo": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "integer"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
},
|
||||
"backend": {
|
||||
"type": "string",
|
||||
"enum": ["Redb", "InMemory", "Custom"]
|
||||
},
|
||||
"encrypted": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"redis_version": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
},
|
||||
"storage_path": {
|
||||
"type": "string",
|
||||
"nullable": true
|
||||
},
|
||||
"size_on_disk": {
|
||||
"type": "integer",
|
||||
"nullable": true
|
||||
},
|
||||
"key_count": {
|
||||
"type": "integer",
|
||||
"nullable": true
|
||||
},
|
||||
"created_at": {
|
||||
"type": "integer"
|
||||
},
|
||||
"last_access": {
|
||||
"type": "integer",
|
||||
"nullable": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,2 +0,0 @@
|
||||
/// The OpenRPC specification for the HeroDB JSON-RPC API
|
||||
pub const OPENRPC_SPEC: &str = include_str!("../docs/openrpc.json");
|
@@ -1,8 +0,0 @@
|
||||
#[derive(Clone)]
|
||||
pub struct DBOption {
|
||||
pub dir: String,
|
||||
pub port: u16,
|
||||
pub debug: bool,
|
||||
pub encrypt: bool,
|
||||
pub encryption_key: Option<String>, // Master encryption key
|
||||
}
|
@@ -1,300 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::server::Server;
|
||||
use crate::openrpc_spec::OPENRPC_SPEC;
|
||||
|
||||
/// Database backend types
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum BackendType {
|
||||
Redb,
|
||||
// Future: InMemory, Custom(String)
|
||||
}
|
||||
|
||||
/// Database configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DatabaseConfig {
|
||||
pub name: Option<String>,
|
||||
pub storage_path: Option<String>,
|
||||
pub max_size: Option<u64>,
|
||||
pub redis_version: Option<String>,
|
||||
}
|
||||
|
||||
/// Database information returned by metadata queries
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DatabaseInfo {
|
||||
pub id: u64,
|
||||
pub name: Option<String>,
|
||||
pub backend: BackendType,
|
||||
pub encrypted: bool,
|
||||
pub redis_version: Option<String>,
|
||||
pub storage_path: Option<String>,
|
||||
pub size_on_disk: Option<u64>,
|
||||
pub key_count: Option<u64>,
|
||||
pub created_at: u64,
|
||||
pub last_access: Option<u64>,
|
||||
}
|
||||
|
||||
/// RPC trait for HeroDB management
|
||||
#[rpc(client, server, namespace = "herodb")]
|
||||
pub trait Rpc {
|
||||
/// Configure an existing database with specific settings
|
||||
#[method(name = "configureDatabase")]
|
||||
async fn configure_database(
|
||||
&self,
|
||||
db_index: u64,
|
||||
config: DatabaseConfig
|
||||
) -> RpcResult<bool>;
|
||||
|
||||
/// Create/pre-initialize a database at the specified index
|
||||
#[method(name = "createDatabase")]
|
||||
async fn create_database(&self, db_index: u64) -> RpcResult<bool>;
|
||||
|
||||
/// Set encryption for a specific database index (write-only key)
|
||||
#[method(name = "setDatabaseEncryption")]
|
||||
async fn set_database_encryption(&self, db_index: u64, encryption_key: String) -> RpcResult<bool>;
|
||||
|
||||
/// List all database indices that exist
|
||||
#[method(name = "listDatabases")]
|
||||
async fn list_databases(&self) -> RpcResult<Vec<u64>>;
|
||||
|
||||
/// Get detailed information about a specific database
|
||||
#[method(name = "getDatabaseInfo")]
|
||||
async fn get_database_info(&self, db_index: u64) -> RpcResult<DatabaseInfo>;
|
||||
|
||||
/// Delete a database and its files
|
||||
#[method(name = "deleteDatabase")]
|
||||
async fn delete_database(&self, db_index: u64) -> RpcResult<bool>;
|
||||
|
||||
/// Get server statistics
|
||||
#[method(name = "getServerStats")]
|
||||
async fn get_server_stats(&self) -> RpcResult<HashMap<String, serde_json::Value>>;
|
||||
}
|
||||
|
||||
/// RPC Discovery trait for API introspection
|
||||
#[rpc(client, server, namespace = "rpc", namespace_separator = ".")]
|
||||
pub trait RpcDiscovery {
|
||||
/// Get the OpenRPC specification for API discovery
|
||||
#[method(name = "discover")]
|
||||
async fn discover(&self) -> RpcResult<Value>;
|
||||
}
|
||||
|
||||
/// RPC Server implementation
|
||||
#[derive(Clone)]
|
||||
pub struct RpcServerImpl {
|
||||
/// Reference to the main Redis server
|
||||
main_server: Arc<Mutex<Server>>,
|
||||
/// Base directory for database files
|
||||
base_dir: String,
|
||||
}
|
||||
|
||||
impl RpcServerImpl {
|
||||
/// Create a new RPC server instance with reference to main server
|
||||
pub fn new(main_server: Arc<Mutex<Server>>, base_dir: String) -> Self {
|
||||
Self {
|
||||
main_server,
|
||||
base_dir,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[jsonrpsee::core::async_trait]
|
||||
impl RpcServer for RpcServerImpl {
|
||||
async fn configure_database(
|
||||
&self,
|
||||
db_index: u64,
|
||||
config: DatabaseConfig
|
||||
) -> RpcResult<bool> {
|
||||
// For now, configuration is mainly informational
|
||||
// In a full implementation, this could set database-specific settings
|
||||
println!("Configured database {} with settings: {:?}", db_index, config);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn create_database(&self, db_index: u64) -> RpcResult<bool> {
|
||||
// Lock the main server to create the database
|
||||
let mut server_guard = self.main_server.lock().await;
|
||||
|
||||
// Save the current selected_db to restore it later
|
||||
let original_db = server_guard.selected_db;
|
||||
|
||||
// Temporarily set the selected_db to the target database
|
||||
server_guard.selected_db = db_index;
|
||||
|
||||
// Call current_storage() which will create the database file if it doesn't exist
|
||||
match server_guard.current_storage() {
|
||||
Ok(_) => {
|
||||
println!("Successfully created database at index {}", db_index);
|
||||
|
||||
// Restore the original selected_db
|
||||
server_guard.selected_db = original_db;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
Err(e) => {
|
||||
// Restore the original selected_db even on error
|
||||
server_guard.selected_db = original_db;
|
||||
|
||||
Err(jsonrpsee::types::ErrorObjectOwned::owned(
|
||||
-32000,
|
||||
format!("Failed to create database {}: {}", db_index, e.0),
|
||||
None::<()>
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn set_database_encryption(&self, db_index: u64, _encryption_key: String) -> RpcResult<bool> {
|
||||
// Note: Encryption is determined at database creation time based on db_index
|
||||
// DB 0-9 are non-encrypted, DB 10+ are encrypted
|
||||
// This method is mainly for documentation/configuration purposes
|
||||
println!("Note: Database {} encryption is determined by index (10+ = encrypted)", db_index);
|
||||
println!("Encryption key provided but not stored (write-only policy)");
|
||||
Ok(db_index >= 10) // Return true if this DB would be encrypted
|
||||
}
|
||||
|
||||
async fn list_databases(&self) -> RpcResult<Vec<u64>> {
|
||||
// Scan the database directory for existing .db files
|
||||
let mut db_indices = Vec::new();
|
||||
|
||||
if let Ok(entries) = std::fs::read_dir(&self.base_dir) {
|
||||
for entry in entries.flatten() {
|
||||
if let Some(file_name) = entry.file_name().to_str() {
|
||||
if let Some(index_str) = file_name.strip_suffix(".db") {
|
||||
if let Ok(index) = index_str.parse::<u64>() {
|
||||
db_indices.push(index);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Also include database 0 (default) even if file doesn't exist yet
|
||||
if !db_indices.contains(&0) {
|
||||
db_indices.push(0);
|
||||
}
|
||||
|
||||
db_indices.sort();
|
||||
Ok(db_indices)
|
||||
}
|
||||
|
||||
async fn get_database_info(&self, db_index: u64) -> RpcResult<DatabaseInfo> {
|
||||
// Check if database file exists
|
||||
let db_path = std::path::PathBuf::from(&self.base_dir).join(format!("{}.db", db_index));
|
||||
let file_exists = db_path.exists();
|
||||
|
||||
// If database doesn't exist, return an error
|
||||
if !file_exists && db_index != 0 {
|
||||
return Err(jsonrpsee::types::ErrorObjectOwned::owned(
|
||||
-32000,
|
||||
format!("Database {} does not exist", db_index),
|
||||
None::<()>
|
||||
));
|
||||
}
|
||||
|
||||
// Get file metadata if it exists
|
||||
let (size_on_disk, created_at) = if file_exists {
|
||||
if let Ok(metadata) = std::fs::metadata(&db_path) {
|
||||
let size = Some(metadata.len());
|
||||
let created = metadata.created()
|
||||
.unwrap_or(std::time::SystemTime::UNIX_EPOCH)
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
(size, created)
|
||||
} else {
|
||||
(None, 0)
|
||||
}
|
||||
} else {
|
||||
// Database 0 might not have a file yet
|
||||
(None, std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs())
|
||||
};
|
||||
|
||||
Ok(DatabaseInfo {
|
||||
id: db_index,
|
||||
name: None, // Could be extended to store names
|
||||
backend: BackendType::Redb,
|
||||
encrypted: db_index >= 10, // Based on HeroDB's encryption rule
|
||||
redis_version: Some("7.0".to_string()),
|
||||
storage_path: Some(self.base_dir.clone()),
|
||||
size_on_disk,
|
||||
key_count: None, // Would need to open DB to count keys
|
||||
created_at,
|
||||
last_access: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn delete_database(&self, db_index: u64) -> RpcResult<bool> {
|
||||
// Don't allow deletion of database 0 (default)
|
||||
if db_index == 0 {
|
||||
return Err(jsonrpsee::types::ErrorObjectOwned::owned(
|
||||
-32000,
|
||||
"Cannot delete default database (index 0)".to_string(),
|
||||
None::<()>
|
||||
));
|
||||
}
|
||||
|
||||
let db_path = std::path::PathBuf::from(&self.base_dir).join(format!("{}.db", db_index));
|
||||
|
||||
if db_path.exists() {
|
||||
match std::fs::remove_file(&db_path) {
|
||||
Ok(_) => {
|
||||
println!("Deleted database file: {}", db_path.display());
|
||||
Ok(true)
|
||||
}
|
||||
Err(e) => {
|
||||
Err(jsonrpsee::types::ErrorObjectOwned::owned(
|
||||
-32000,
|
||||
format!("Failed to delete database {}: {}", db_index, e),
|
||||
None::<()>
|
||||
))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(false) // Database didn't exist
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_server_stats(&self) -> RpcResult<HashMap<String, serde_json::Value>> {
|
||||
let mut stats = HashMap::new();
|
||||
|
||||
// Get list of databases
|
||||
let databases = self.list_databases().await.unwrap_or_default();
|
||||
|
||||
stats.insert("total_databases".to_string(), serde_json::json!(databases.len()));
|
||||
stats.insert("database_indices".to_string(), serde_json::json!(databases));
|
||||
stats.insert("uptime".to_string(), serde_json::json!(
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs()
|
||||
));
|
||||
let server_guard = self.main_server.lock().await;
|
||||
stats.insert("server_port".to_string(), serde_json::json!(server_guard.option.port));
|
||||
stats.insert("data_directory".to_string(), serde_json::json!(self.base_dir));
|
||||
|
||||
Ok(stats)
|
||||
}
|
||||
}
|
||||
|
||||
#[jsonrpsee::core::async_trait]
|
||||
impl RpcDiscoveryServer for RpcServerImpl {
|
||||
async fn discover(&self) -> RpcResult<Value> {
|
||||
// Parse the OpenRPC spec JSON and return it
|
||||
match serde_json::from_str(OPENRPC_SPEC) {
|
||||
Ok(spec) => Ok(spec),
|
||||
Err(e) => Err(jsonrpsee::types::ErrorObjectOwned::owned(
|
||||
-32000,
|
||||
format!("Failed to parse OpenRPC specification: {}", e),
|
||||
None::<()>
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,126 +0,0 @@
|
||||
use std::{
|
||||
path::Path,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use redb::{Database, TableDefinition};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::crypto::CryptoFactory;
|
||||
use crate::error::DBError;
|
||||
|
||||
// Re-export modules
|
||||
mod storage_basic;
|
||||
mod storage_hset;
|
||||
mod storage_lists;
|
||||
mod storage_extra;
|
||||
|
||||
// Re-export implementations
|
||||
// Note: These imports are used by the impl blocks in the submodules
|
||||
// The compiler shows them as unused because they're not directly used in this file
|
||||
// but they're needed for the Storage struct methods to be available
|
||||
pub use storage_extra::*;
|
||||
|
||||
// Table definitions for different Redis data types
|
||||
const TYPES_TABLE: TableDefinition<&str, &str> = TableDefinition::new("types");
|
||||
const STRINGS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("strings");
|
||||
const HASHES_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("hashes");
|
||||
const LISTS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("lists");
|
||||
const STREAMS_META_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("streams_meta");
|
||||
const STREAMS_DATA_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("streams_data");
|
||||
const ENCRYPTED_TABLE: TableDefinition<&str, u8> = TableDefinition::new("encrypted");
|
||||
const EXPIRATION_TABLE: TableDefinition<&str, u64> = TableDefinition::new("expiration");
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct StreamEntry {
|
||||
pub fields: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ListValue {
|
||||
pub elements: Vec<String>,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn now_in_millis() -> u128 {
|
||||
let start = SystemTime::now();
|
||||
let duration_since_epoch = start.duration_since(UNIX_EPOCH).unwrap();
|
||||
duration_since_epoch.as_millis()
|
||||
}
|
||||
|
||||
pub struct Storage {
|
||||
db: Database,
|
||||
crypto: Option<CryptoFactory>,
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn new(path: impl AsRef<Path>, should_encrypt: bool, master_key: Option<&str>) -> Result<Self, DBError> {
|
||||
let db = Database::create(path)?;
|
||||
|
||||
// Create tables if they don't exist
|
||||
let write_txn = db.begin_write()?;
|
||||
{
|
||||
let _ = write_txn.open_table(TYPES_TABLE)?;
|
||||
let _ = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let _ = write_txn.open_table(HASHES_TABLE)?;
|
||||
let _ = write_txn.open_table(LISTS_TABLE)?;
|
||||
let _ = write_txn.open_table(STREAMS_META_TABLE)?;
|
||||
let _ = write_txn.open_table(STREAMS_DATA_TABLE)?;
|
||||
let _ = write_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
let _ = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
|
||||
// Check if database was previously encrypted
|
||||
let read_txn = db.begin_read()?;
|
||||
let encrypted_table = read_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
let was_encrypted = encrypted_table.get("encrypted")?.map(|v| v.value() == 1).unwrap_or(false);
|
||||
drop(read_txn);
|
||||
|
||||
let crypto = if should_encrypt || was_encrypted {
|
||||
if let Some(key) = master_key {
|
||||
Some(CryptoFactory::new(key.as_bytes()))
|
||||
} else {
|
||||
return Err(DBError("Encryption requested but no master key provided".to_string()));
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// If we're enabling encryption for the first time, mark it
|
||||
if should_encrypt && !was_encrypted {
|
||||
let write_txn = db.begin_write()?;
|
||||
{
|
||||
let mut encrypted_table = write_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
encrypted_table.insert("encrypted", &1u8)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
}
|
||||
|
||||
Ok(Storage {
|
||||
db,
|
||||
crypto,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_encrypted(&self) -> bool {
|
||||
self.crypto.is_some()
|
||||
}
|
||||
|
||||
// Helper methods for encryption
|
||||
fn encrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
|
||||
if let Some(crypto) = &self.crypto {
|
||||
Ok(crypto.encrypt(data))
|
||||
} else {
|
||||
Ok(data.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
fn decrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
|
||||
if let Some(crypto) = &self.crypto {
|
||||
Ok(crypto.decrypt(data)?)
|
||||
} else {
|
||||
Ok(data.to_vec())
|
||||
}
|
||||
}
|
||||
}
|
1251
specs/backgroundinfo/lance.md
Normal file
1251
specs/backgroundinfo/lance.md
Normal file
File diff suppressed because it is too large
Load Diff
6847
specs/backgroundinfo/lancedb.md
Normal file
6847
specs/backgroundinfo/lancedb.md
Normal file
File diff suppressed because it is too large
Load Diff
113
specs/backgroundinfo/sled.md
Normal file
113
specs/backgroundinfo/sled.md
Normal file
@@ -0,0 +1,113 @@
|
||||
========================
|
||||
CODE SNIPPETS
|
||||
========================
|
||||
TITLE: Basic Database Operations with sled in Rust
|
||||
DESCRIPTION: This snippet demonstrates fundamental operations using the `sled` embedded database in Rust. It covers opening a database tree, inserting and retrieving key-value pairs, performing range queries, deleting entries, and executing an atomic compare-and-swap operation. It also shows how to flush changes to disk for durability.
|
||||
|
||||
SOURCE: https://github.com/spacejam/sled/blob/main/README.md#_snippet_0
|
||||
|
||||
LANGUAGE: Rust
|
||||
CODE:
|
||||
```
|
||||
let tree = sled::open("/tmp/welcome-to-sled")?;
|
||||
|
||||
// insert and get, similar to std's BTreeMap
|
||||
let old_value = tree.insert("key", "value")?;
|
||||
|
||||
assert_eq!(
|
||||
tree.get(&"key")?,
|
||||
Some(sled::IVec::from("value")),
|
||||
);
|
||||
|
||||
// range queries
|
||||
for kv_result in tree.range("key_1".."key_9") {}
|
||||
|
||||
// deletion
|
||||
let old_value = tree.remove(&"key")?;
|
||||
|
||||
// atomic compare and swap
|
||||
tree.compare_and_swap(
|
||||
"key",
|
||||
Some("current_value"),
|
||||
Some("new_value"),
|
||||
)?;
|
||||
|
||||
// block until all operations are stable on disk
|
||||
// (flush_async also available to get a Future)
|
||||
tree.flush()?;
|
||||
```
|
||||
|
||||
----------------------------------------
|
||||
|
||||
TITLE: Subscribing to sled Events Asynchronously (Rust)
|
||||
DESCRIPTION: This snippet demonstrates how to asynchronously subscribe to events on key prefixes in a `sled` database. It initializes a `sled` database, creates a `Subscriber` for all key prefixes, inserts a key-value pair to trigger an event, and then uses `extreme::run` to await and process incoming events. The `Subscriber` struct implements `Future<Output=Option<Event>>`, allowing it to be awaited in an async context.
|
||||
|
||||
SOURCE: https://github.com/spacejam/sled/blob/main/README.md#_snippet_1
|
||||
|
||||
LANGUAGE: Rust
|
||||
CODE:
|
||||
```
|
||||
let sled = sled::open("my_db").unwrap();
|
||||
|
||||
let mut sub = sled.watch_prefix("");
|
||||
|
||||
sled.insert(b"a", b"a").unwrap();
|
||||
|
||||
extreme::run(async move {
|
||||
while let Some(event) = (&mut sub).await {
|
||||
println!("got event {:?}", event);
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
----------------------------------------
|
||||
|
||||
TITLE: Iterating Subscriber Events with Async/Await in Rust
|
||||
DESCRIPTION: This snippet demonstrates how to asynchronously iterate over events from a `Subscriber` instance in Rust. Since `Subscriber` now implements `Future`, it can be awaited in a loop to process incoming events, enabling efficient prefix watching. The loop continues as long as new events are available.
|
||||
|
||||
SOURCE: https://github.com/spacejam/sled/blob/main/CHANGELOG.md#_snippet_0
|
||||
|
||||
LANGUAGE: Rust
|
||||
CODE:
|
||||
```
|
||||
while let Some(event) = (&mut subscriber).await {}
|
||||
```
|
||||
|
||||
----------------------------------------
|
||||
|
||||
TITLE: Suppressing TSAN Race on Arc::drop in Rust
|
||||
DESCRIPTION: This suppression addresses a false positive race detection by ThreadSanitizer in Rust's `Arc::drop` implementation. TSAN fails to correctly reason about the raw atomic `Acquire` fence used after the strong-count atomic subtraction with a `Release` fence in the `Drop` implementation, leading to an erroneous race report.
|
||||
|
||||
SOURCE: https://github.com/spacejam/sled/blob/main/tsan_suppressions.txt#_snippet_0
|
||||
|
||||
LANGUAGE: TSAN Suppression
|
||||
CODE:
|
||||
```
|
||||
race:Arc*drop
|
||||
```
|
||||
|
||||
----------------------------------------
|
||||
|
||||
TITLE: Suppressing TSAN Race on std::thread::local in Rust
|
||||
DESCRIPTION: This suppression addresses ThreadSanitizer false positives when using Rust's `std::thread::local`. Similar to `lazy_static`, `std::thread::local` utilizes implicit barriers that TSAN fails to recognize, leading to incorrect race condition reports.
|
||||
|
||||
SOURCE: https://github.com/spacejam/sled/blob/main/tsan_suppressions.txt#_snippet_2
|
||||
|
||||
LANGUAGE: TSAN Suppression
|
||||
CODE:
|
||||
```
|
||||
race:std::thread::local
|
||||
```
|
||||
|
||||
----------------------------------------
|
||||
|
||||
TITLE: Suppressing TSAN Race on lazy_static in Rust
|
||||
DESCRIPTION: This suppression targets ThreadSanitizer false positives related to the `lazy_static` crate in Rust. `lazy_static` relies on implicit memory barriers that TSAN does not correctly detect, causing it to report races where none exist.
|
||||
|
||||
SOURCE: https://github.com/spacejam/sled/blob/main/tsan_suppressions.txt#_snippet_1
|
||||
|
||||
LANGUAGE: TSAN Suppression
|
||||
CODE:
|
||||
```
|
||||
race:lazy_static
|
||||
```
|
735
specs/lance_implementation.md
Normal file
735
specs/lance_implementation.md
Normal file
@@ -0,0 +1,735 @@
|
||||
Looking at your request to ensure embedding happens with an external embedding provider configured in Redis at `config:core:aiembed:url`, I'll modify the implementation to use an external embedding service instead of local models.
|
||||
|
||||
## Updated Implementation with External Embedding Provider
|
||||
|
||||
### 1. Update Dependencies in `Cargo.toml`
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
# ... existing dependencies ...
|
||||
lance = "0.18"
|
||||
lance-index = "0.18"
|
||||
lance-linalg = "0.18"
|
||||
arrow = "52"
|
||||
arrow-array = "52"
|
||||
arrow-schema = "52"
|
||||
parquet = "52"
|
||||
uuid = { version = "1.10", features = ["v4"] }
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
base64 = "0.22"
|
||||
image = "0.25"
|
||||
```
|
||||
|
||||
### 2. Create Enhanced Lance Module with External Embedding
|
||||
|
||||
Create `src/lance_store.rs`:
|
||||
|
||||
```rust
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use arrow::array::{Float32Array, StringArray, BinaryArray, ArrayRef};
|
||||
use arrow::datatypes::{DataType, Field, Schema};
|
||||
use arrow::record_batch::RecordBatch;
|
||||
use lance::dataset::{Dataset, WriteParams, WriteMode};
|
||||
use lance::index::vector::VectorIndexParams;
|
||||
use lance_index::vector::pq::PQBuildParams;
|
||||
use lance_index::vector::ivf::IvfBuildParams;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use crate::error::DBError;
|
||||
use crate::cmd::Protocol;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct EmbeddingRequest {
|
||||
texts: Option<Vec<String>>,
|
||||
images: Option<Vec<String>>, // base64 encoded
|
||||
model: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct EmbeddingResponse {
|
||||
embeddings: Vec<Vec<f32>>,
|
||||
model: String,
|
||||
usage: Option<HashMap<String, u32>>,
|
||||
}
|
||||
|
||||
pub struct LanceStore {
|
||||
datasets: Arc<RwLock<HashMap<String, Arc<Dataset>>>>,
|
||||
data_dir: PathBuf,
|
||||
http_client: reqwest::Client,
|
||||
}
|
||||
|
||||
impl LanceStore {
|
||||
pub async fn new(data_dir: PathBuf) -> Result<Self, DBError> {
|
||||
// Create data directory if it doesn't exist
|
||||
std::fs::create_dir_all(&data_dir)
|
||||
.map_err(|e| DBError(format!("Failed to create Lance data directory: {}", e)))?;
|
||||
|
||||
let http_client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(30))
|
||||
.build()
|
||||
.map_err(|e| DBError(format!("Failed to create HTTP client: {}", e)))?;
|
||||
|
||||
Ok(Self {
|
||||
datasets: Arc::new(RwLock::new(HashMap::new())),
|
||||
data_dir,
|
||||
http_client,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get embedding service URL from Redis config
|
||||
async fn get_embedding_url(&self, server: &crate::server::Server) -> Result<String, DBError> {
|
||||
// Get the embedding URL from Redis config
|
||||
let key = "config:core:aiembed:url";
|
||||
|
||||
// Use HGET to retrieve the URL from Redis hash
|
||||
let cmd = crate::cmd::Cmd::HGet {
|
||||
key: key.to_string(),
|
||||
field: "url".to_string(),
|
||||
};
|
||||
|
||||
// Execute command to get the config
|
||||
let result = cmd.run(server).await?;
|
||||
|
||||
match result {
|
||||
Protocol::BulkString(url) => Ok(url),
|
||||
Protocol::SimpleString(url) => Ok(url),
|
||||
Protocol::Nil => Err(DBError(
|
||||
"Embedding service URL not configured. Set it with: HSET config:core:aiembed:url url <YOUR_EMBEDDING_SERVICE_URL>".to_string()
|
||||
)),
|
||||
_ => Err(DBError("Invalid embedding URL configuration".to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Call external embedding service
|
||||
async fn call_embedding_service(
|
||||
&self,
|
||||
server: &crate::server::Server,
|
||||
texts: Option<Vec<String>>,
|
||||
images: Option<Vec<String>>,
|
||||
) -> Result<Vec<Vec<f32>>, DBError> {
|
||||
let url = self.get_embedding_url(server).await?;
|
||||
|
||||
let request = EmbeddingRequest {
|
||||
texts,
|
||||
images,
|
||||
model: None, // Let the service use its default
|
||||
};
|
||||
|
||||
let response = self.http_client
|
||||
.post(&url)
|
||||
.json(&request)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| DBError(format!("Failed to call embedding service: {}", e)))?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
let status = response.status();
|
||||
let error_text = response.text().await.unwrap_or_default();
|
||||
return Err(DBError(format!(
|
||||
"Embedding service returned error {}: {}",
|
||||
status, error_text
|
||||
)));
|
||||
}
|
||||
|
||||
let embedding_response: EmbeddingResponse = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| DBError(format!("Failed to parse embedding response: {}", e)))?;
|
||||
|
||||
Ok(embedding_response.embeddings)
|
||||
}
|
||||
|
||||
pub async fn embed_text(
|
||||
&self,
|
||||
server: &crate::server::Server,
|
||||
texts: Vec<String>
|
||||
) -> Result<Vec<Vec<f32>>, DBError> {
|
||||
if texts.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
self.call_embedding_service(server, Some(texts), None).await
|
||||
}
|
||||
|
||||
pub async fn embed_image(
|
||||
&self,
|
||||
server: &crate::server::Server,
|
||||
image_bytes: Vec<u8>
|
||||
) -> Result<Vec<f32>, DBError> {
|
||||
// Convert image bytes to base64
|
||||
let base64_image = base64::encode(&image_bytes);
|
||||
|
||||
let embeddings = self.call_embedding_service(
|
||||
server,
|
||||
None,
|
||||
Some(vec![base64_image])
|
||||
).await?;
|
||||
|
||||
embeddings.into_iter()
|
||||
.next()
|
||||
.ok_or_else(|| DBError("No embedding returned for image".to_string()))
|
||||
}
|
||||
|
||||
pub async fn create_dataset(
|
||||
&self,
|
||||
name: &str,
|
||||
schema: Schema,
|
||||
) -> Result<(), DBError> {
|
||||
let dataset_path = self.data_dir.join(format!("{}.lance", name));
|
||||
|
||||
// Create empty dataset with schema
|
||||
let write_params = WriteParams {
|
||||
mode: WriteMode::Create,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Create an empty RecordBatch with the schema
|
||||
let empty_batch = RecordBatch::new_empty(Arc::new(schema));
|
||||
let batches = vec![empty_batch];
|
||||
|
||||
let dataset = Dataset::write(
|
||||
batches,
|
||||
dataset_path.to_str().unwrap(),
|
||||
Some(write_params)
|
||||
).await
|
||||
.map_err(|e| DBError(format!("Failed to create dataset: {}", e)))?;
|
||||
|
||||
let mut datasets = self.datasets.write().await;
|
||||
datasets.insert(name.to_string(), Arc::new(dataset));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn write_vectors(
|
||||
&self,
|
||||
dataset_name: &str,
|
||||
vectors: Vec<Vec<f32>>,
|
||||
metadata: Option<HashMap<String, Vec<String>>>,
|
||||
) -> Result<usize, DBError> {
|
||||
let dataset_path = self.data_dir.join(format!("{}.lance", dataset_name));
|
||||
|
||||
// Open or get cached dataset
|
||||
let dataset = self.get_or_open_dataset(dataset_name).await?;
|
||||
|
||||
// Build RecordBatch
|
||||
let num_vectors = vectors.len();
|
||||
if num_vectors == 0 {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let dim = vectors.first()
|
||||
.ok_or_else(|| DBError("Empty vectors".to_string()))?
|
||||
.len();
|
||||
|
||||
// Flatten vectors
|
||||
let flat_vectors: Vec<f32> = vectors.into_iter().flatten().collect();
|
||||
let vector_array = Float32Array::from(flat_vectors);
|
||||
let vector_array = arrow::array::FixedSizeListArray::try_new_from_values(
|
||||
vector_array,
|
||||
dim as i32
|
||||
).map_err(|e| DBError(format!("Failed to create vector array: {}", e)))?;
|
||||
|
||||
let mut arrays: Vec<ArrayRef> = vec![Arc::new(vector_array)];
|
||||
let mut fields = vec![Field::new(
|
||||
"vector",
|
||||
DataType::FixedSizeList(
|
||||
Arc::new(Field::new("item", DataType::Float32, true)),
|
||||
dim as i32
|
||||
),
|
||||
false
|
||||
)];
|
||||
|
||||
// Add metadata columns if provided
|
||||
if let Some(metadata) = metadata {
|
||||
for (key, values) in metadata {
|
||||
if values.len() != num_vectors {
|
||||
return Err(DBError(format!(
|
||||
"Metadata field '{}' has {} values but expected {}",
|
||||
key, values.len(), num_vectors
|
||||
)));
|
||||
}
|
||||
let array = StringArray::from(values);
|
||||
arrays.push(Arc::new(array));
|
||||
fields.push(Field::new(&key, DataType::Utf8, true));
|
||||
}
|
||||
}
|
||||
|
||||
let schema = Arc::new(Schema::new(fields));
|
||||
let batch = RecordBatch::try_new(schema, arrays)
|
||||
.map_err(|e| DBError(format!("Failed to create RecordBatch: {}", e)))?;
|
||||
|
||||
// Append to dataset
|
||||
let write_params = WriteParams {
|
||||
mode: WriteMode::Append,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
Dataset::write(
|
||||
vec![batch],
|
||||
dataset_path.to_str().unwrap(),
|
||||
Some(write_params)
|
||||
).await
|
||||
.map_err(|e| DBError(format!("Failed to write to dataset: {}", e)))?;
|
||||
|
||||
// Refresh cached dataset
|
||||
let mut datasets = self.datasets.write().await;
|
||||
datasets.remove(dataset_name);
|
||||
|
||||
Ok(num_vectors)
|
||||
}
|
||||
|
||||
pub async fn search_vectors(
|
||||
&self,
|
||||
dataset_name: &str,
|
||||
query_vector: Vec<f32>,
|
||||
k: usize,
|
||||
nprobes: Option<usize>,
|
||||
refine_factor: Option<usize>,
|
||||
) -> Result<Vec<(f32, HashMap<String, String>)>, DBError> {
|
||||
let dataset = self.get_or_open_dataset(dataset_name).await?;
|
||||
|
||||
// Build query
|
||||
let mut query = dataset.scan();
|
||||
query = query.nearest(
|
||||
"vector",
|
||||
&query_vector,
|
||||
k,
|
||||
).map_err(|e| DBError(format!("Failed to build search query: {}", e)))?;
|
||||
|
||||
if let Some(nprobes) = nprobes {
|
||||
query = query.nprobes(nprobes);
|
||||
}
|
||||
|
||||
if let Some(refine) = refine_factor {
|
||||
query = query.refine_factor(refine);
|
||||
}
|
||||
|
||||
// Execute search
|
||||
let results = query
|
||||
.try_into_stream()
|
||||
.await
|
||||
.map_err(|e| DBError(format!("Failed to execute search: {}", e)))?
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.map_err(|e| DBError(format!("Failed to collect results: {}", e)))?;
|
||||
|
||||
// Process results
|
||||
let mut output = Vec::new();
|
||||
for batch in results {
|
||||
// Get distances
|
||||
let distances = batch
|
||||
.column_by_name("_distance")
|
||||
.ok_or_else(|| DBError("No distance column".to_string()))?
|
||||
.as_any()
|
||||
.downcast_ref::<Float32Array>()
|
||||
.ok_or_else(|| DBError("Invalid distance type".to_string()))?;
|
||||
|
||||
// Get metadata
|
||||
for i in 0..batch.num_rows() {
|
||||
let distance = distances.value(i);
|
||||
let mut metadata = HashMap::new();
|
||||
|
||||
for field in batch.schema().fields() {
|
||||
if field.name() != "vector" && field.name() != "_distance" {
|
||||
if let Some(col) = batch.column_by_name(field.name()) {
|
||||
if let Some(str_array) = col.as_any().downcast_ref::<StringArray>() {
|
||||
if !str_array.is_null(i) {
|
||||
metadata.insert(
|
||||
field.name().to_string(),
|
||||
str_array.value(i).to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output.push((distance, metadata));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
pub async fn store_multimodal(
|
||||
&self,
|
||||
server: &crate::server::Server,
|
||||
dataset_name: &str,
|
||||
text: Option<String>,
|
||||
image_bytes: Option<Vec<u8>>,
|
||||
metadata: HashMap<String, String>,
|
||||
) -> Result<String, DBError> {
|
||||
// Generate ID
|
||||
let id = uuid::Uuid::new_v4().to_string();
|
||||
|
||||
// Generate embeddings using external service
|
||||
let embedding = if let Some(text) = text.as_ref() {
|
||||
self.embed_text(server, vec![text.clone()]).await?
|
||||
.into_iter()
|
||||
.next()
|
||||
.ok_or_else(|| DBError("No embedding returned".to_string()))?
|
||||
} else if let Some(img) = image_bytes.as_ref() {
|
||||
self.embed_image(server, img.clone()).await?
|
||||
} else {
|
||||
return Err(DBError("No text or image provided".to_string()));
|
||||
};
|
||||
|
||||
// Prepare metadata
|
||||
let mut full_metadata = metadata;
|
||||
full_metadata.insert("id".to_string(), id.clone());
|
||||
if let Some(text) = text {
|
||||
full_metadata.insert("text".to_string(), text);
|
||||
}
|
||||
if let Some(img) = image_bytes {
|
||||
full_metadata.insert("image_base64".to_string(), base64::encode(img));
|
||||
}
|
||||
|
||||
// Convert metadata to column vectors
|
||||
let mut metadata_cols = HashMap::new();
|
||||
for (key, value) in full_metadata {
|
||||
metadata_cols.insert(key, vec![value]);
|
||||
}
|
||||
|
||||
// Write to dataset
|
||||
self.write_vectors(dataset_name, vec![embedding], Some(metadata_cols)).await?;
|
||||
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn search_with_text(
|
||||
&self,
|
||||
server: &crate::server::Server,
|
||||
dataset_name: &str,
|
||||
query_text: String,
|
||||
k: usize,
|
||||
nprobes: Option<usize>,
|
||||
refine_factor: Option<usize>,
|
||||
) -> Result<Vec<(f32, HashMap<String, String>)>, DBError> {
|
||||
// Embed the query text using external service
|
||||
let embeddings = self.embed_text(server, vec![query_text]).await?;
|
||||
let query_vector = embeddings.into_iter()
|
||||
.next()
|
||||
.ok_or_else(|| DBError("No embedding returned for query".to_string()))?;
|
||||
|
||||
// Search with the embedding
|
||||
self.search_vectors(dataset_name, query_vector, k, nprobes, refine_factor).await
|
||||
}
|
||||
|
||||
pub async fn create_index(
|
||||
&self,
|
||||
dataset_name: &str,
|
||||
index_type: &str,
|
||||
num_partitions: Option<usize>,
|
||||
num_sub_vectors: Option<usize>,
|
||||
) -> Result<(), DBError> {
|
||||
let dataset = self.get_or_open_dataset(dataset_name).await?;
|
||||
|
||||
let mut params = VectorIndexParams::default();
|
||||
|
||||
match index_type.to_uppercase().as_str() {
|
||||
"IVF_PQ" => {
|
||||
params.ivf = IvfBuildParams {
|
||||
num_partitions: num_partitions.unwrap_or(256),
|
||||
..Default::default()
|
||||
};
|
||||
params.pq = PQBuildParams {
|
||||
num_sub_vectors: num_sub_vectors.unwrap_or(16),
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
_ => return Err(DBError(format!("Unsupported index type: {}", index_type))),
|
||||
}
|
||||
|
||||
dataset.create_index(
|
||||
&["vector"],
|
||||
lance::index::IndexType::Vector,
|
||||
None,
|
||||
¶ms,
|
||||
true
|
||||
).await
|
||||
.map_err(|e| DBError(format!("Failed to create index: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_or_open_dataset(&self, name: &str) -> Result<Arc<Dataset>, DBError> {
|
||||
let mut datasets = self.datasets.write().await;
|
||||
|
||||
if let Some(dataset) = datasets.get(name) {
|
||||
return Ok(dataset.clone());
|
||||
}
|
||||
|
||||
let dataset_path = self.data_dir.join(format!("{}.lance", name));
|
||||
if !dataset_path.exists() {
|
||||
return Err(DBError(format!("Dataset '{}' does not exist", name)));
|
||||
}
|
||||
|
||||
let dataset = Dataset::open(dataset_path.to_str().unwrap())
|
||||
.await
|
||||
.map_err(|e| DBError(format!("Failed to open dataset: {}", e)))?;
|
||||
|
||||
let dataset = Arc::new(dataset);
|
||||
datasets.insert(name.to_string(), dataset.clone());
|
||||
|
||||
Ok(dataset)
|
||||
}
|
||||
|
||||
pub async fn list_datasets(&self) -> Result<Vec<String>, DBError> {
|
||||
let mut datasets = Vec::new();
|
||||
|
||||
let entries = std::fs::read_dir(&self.data_dir)
|
||||
.map_err(|e| DBError(format!("Failed to read data directory: {}", e)))?;
|
||||
|
||||
for entry in entries {
|
||||
let entry = entry.map_err(|e| DBError(format!("Failed to read entry: {}", e)))?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_dir() {
|
||||
if let Some(name) = path.file_name() {
|
||||
if let Some(name_str) = name.to_str() {
|
||||
if name_str.ends_with(".lance") {
|
||||
let dataset_name = name_str.trim_end_matches(".lance");
|
||||
datasets.push(dataset_name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(datasets)
|
||||
}
|
||||
|
||||
pub async fn drop_dataset(&self, name: &str) -> Result<(), DBError> {
|
||||
// Remove from cache
|
||||
let mut datasets = self.datasets.write().await;
|
||||
datasets.remove(name);
|
||||
|
||||
// Delete from disk
|
||||
let dataset_path = self.data_dir.join(format!("{}.lance", name));
|
||||
if dataset_path.exists() {
|
||||
std::fs::remove_dir_all(dataset_path)
|
||||
.map_err(|e| DBError(format!("Failed to delete dataset: {}", e)))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_dataset_info(&self, name: &str) -> Result<HashMap<String, String>, DBError> {
|
||||
let dataset = self.get_or_open_dataset(name).await?;
|
||||
|
||||
let mut info = HashMap::new();
|
||||
info.insert("name".to_string(), name.to_string());
|
||||
info.insert("version".to_string(), dataset.version().to_string());
|
||||
info.insert("num_rows".to_string(), dataset.count_rows().await?.to_string());
|
||||
|
||||
// Get schema info
|
||||
let schema = dataset.schema();
|
||||
let fields: Vec<String> = schema.fields()
|
||||
.iter()
|
||||
.map(|f| format!("{}:{}", f.name(), f.data_type()))
|
||||
.collect();
|
||||
info.insert("schema".to_string(), fields.join(", "));
|
||||
|
||||
Ok(info)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Update Command Implementations
|
||||
|
||||
Update the command implementations to pass the server reference for embedding service access:
|
||||
|
||||
```rust
|
||||
// In cmd.rs, update the lance command implementations
|
||||
|
||||
async fn lance_store_cmd(
|
||||
server: &Server,
|
||||
dataset: &str,
|
||||
text: Option<String>,
|
||||
image_base64: Option<String>,
|
||||
metadata: HashMap<String, String>,
|
||||
) -> Result<Protocol, DBError> {
|
||||
let lance_store = server.lance_store()?;
|
||||
|
||||
// Decode image if provided
|
||||
let image_bytes = if let Some(b64) = image_base64 {
|
||||
Some(base64::decode(b64).map_err(|e|
|
||||
DBError(format!("Invalid base64 image: {}", e)))?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Pass server reference for embedding service access
|
||||
let id = lance_store.store_multimodal(
|
||||
server, // Pass server to access Redis config
|
||||
dataset,
|
||||
text,
|
||||
image_bytes,
|
||||
metadata,
|
||||
).await?;
|
||||
|
||||
Ok(Protocol::BulkString(id))
|
||||
}
|
||||
|
||||
async fn lance_embed_text_cmd(
|
||||
server: &Server,
|
||||
texts: &[String],
|
||||
) -> Result<Protocol, DBError> {
|
||||
let lance_store = server.lance_store()?;
|
||||
|
||||
// Pass server reference for embedding service access
|
||||
let embeddings = lance_store.embed_text(server, texts.to_vec()).await?;
|
||||
|
||||
// Return as array of vectors
|
||||
let mut output = Vec::new();
|
||||
for embedding in embeddings {
|
||||
let vector_str = format!("[{}]",
|
||||
embedding.iter()
|
||||
.map(|f| f.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(",")
|
||||
);
|
||||
output.push(Protocol::BulkString(vector_str));
|
||||
}
|
||||
|
||||
Ok(Protocol::Array(output))
|
||||
}
|
||||
|
||||
async fn lance_search_text_cmd(
|
||||
server: &Server,
|
||||
dataset: &str,
|
||||
query_text: &str,
|
||||
k: usize,
|
||||
nprobes: Option<usize>,
|
||||
refine_factor: Option<usize>,
|
||||
) -> Result<Protocol, DBError> {
|
||||
let lance_store = server.lance_store()?;
|
||||
|
||||
// Search using text query (will be embedded automatically)
|
||||
let results = lance_store.search_with_text(
|
||||
server,
|
||||
dataset,
|
||||
query_text.to_string(),
|
||||
k,
|
||||
nprobes,
|
||||
refine_factor,
|
||||
).await?;
|
||||
|
||||
// Format results
|
||||
let mut output = Vec::new();
|
||||
for (distance, metadata) in results {
|
||||
let metadata_json = serde_json::to_string(&metadata)
|
||||
.unwrap_or_else(|_| "{}".to_string());
|
||||
|
||||
output.push(Protocol::Array(vec![
|
||||
Protocol::BulkString(distance.to_string()),
|
||||
Protocol::BulkString(metadata_json),
|
||||
]));
|
||||
}
|
||||
|
||||
Ok(Protocol::Array(output))
|
||||
}
|
||||
|
||||
// Add new command for text-based search
|
||||
pub enum Cmd {
|
||||
// ... existing commands ...
|
||||
LanceSearchText {
|
||||
dataset: String,
|
||||
query_text: String,
|
||||
k: usize,
|
||||
nprobes: Option<usize>,
|
||||
refine_factor: Option<usize>,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### 1. Configure the Embedding Service
|
||||
|
||||
First, users need to configure the embedding service URL:
|
||||
|
||||
```bash
|
||||
# Configure the embedding service endpoint
|
||||
redis-cli> HSET config:core:aiembed:url url "http://localhost:8000/embeddings"
|
||||
OK
|
||||
|
||||
# Or use a cloud service
|
||||
redis-cli> HSET config:core:aiembed:url url "https://api.openai.com/v1/embeddings"
|
||||
OK
|
||||
```
|
||||
|
||||
### 2. Use Lance Commands with Automatic External Embedding
|
||||
|
||||
```bash
|
||||
# Create a dataset
|
||||
redis-cli> LANCE.CREATE products DIM 1536 SCHEMA name:string price:float category:string
|
||||
OK
|
||||
|
||||
# Store text with automatic embedding (calls external service)
|
||||
redis-cli> LANCE.STORE products TEXT "Wireless noise-canceling headphones with 30-hour battery" name:AirPods price:299.99 category:Electronics
|
||||
"uuid-123-456"
|
||||
|
||||
# Search using text query (automatically embeds the query)
|
||||
redis-cli> LANCE.SEARCH.TEXT products "best headphones for travel" K 5
|
||||
1) "0.92"
|
||||
2) "{\"id\":\"uuid-123\",\"name\":\"AirPods\",\"price\":\"299.99\"}"
|
||||
|
||||
# Get embeddings directly
|
||||
redis-cli> LANCE.EMBED.TEXT "This text will be embedded"
|
||||
1) "[0.123, 0.456, 0.789, ...]"
|
||||
```
|
||||
|
||||
## External Embedding Service API Specification
|
||||
|
||||
The external embedding service should accept POST requests with this format:
|
||||
|
||||
```json
|
||||
// Request
|
||||
{
|
||||
"texts": ["text1", "text2"], // Optional
|
||||
"images": ["base64_img1"], // Optional
|
||||
"model": "text-embedding-ada-002" // Optional
|
||||
}
|
||||
|
||||
// Response
|
||||
{
|
||||
"embeddings": [[0.1, 0.2, ...], [0.3, 0.4, ...]],
|
||||
"model": "text-embedding-ada-002",
|
||||
"usage": {
|
||||
"prompt_tokens": 100,
|
||||
"total_tokens": 100
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The implementation includes comprehensive error handling:
|
||||
|
||||
1. **Missing Configuration**: Clear error message if embedding URL not configured
|
||||
2. **Service Failures**: Graceful handling of embedding service errors
|
||||
3. **Timeout Protection**: 30-second timeout for embedding requests
|
||||
4. **Retry Logic**: Could be added for resilience
|
||||
|
||||
## Benefits of This Approach
|
||||
|
||||
1. **Flexibility**: Supports any embedding service with compatible API
|
||||
2. **Cost Control**: Use your preferred embedding provider
|
||||
3. **Scalability**: Embedding service can be scaled independently
|
||||
4. **Consistency**: All embeddings use the same configured service
|
||||
5. **Security**: API keys and endpoints stored securely in Redis
|
||||
|
||||
This implementation ensures that all embedding operations go through the external service configured in Redis, providing a clean separation between the vector database functionality and the embedding generation.
|
||||
|
||||
|
||||
TODO EXTRA:
|
||||
|
||||
- secret for the embedding service API key
|
||||
|
@@ -1,5 +1,4 @@
|
||||
use crate::{error::DBError, protocol::Protocol, server::Server};
|
||||
use serde::Serialize;
|
||||
use tokio::time::{timeout, Duration};
|
||||
use futures::future::select_all;
|
||||
|
||||
@@ -1093,26 +1092,23 @@ async fn dbsize_cmd(server: &Server) -> Result<Protocol, DBError> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ServerInfo {
|
||||
redis_version: String,
|
||||
encrypted: bool,
|
||||
selected_db: u64,
|
||||
}
|
||||
|
||||
async fn info_cmd(server: &Server, section: &Option<String>) -> Result<Protocol, DBError> {
|
||||
let info = ServerInfo {
|
||||
redis_version: "7.0.0".to_string(),
|
||||
encrypted: server.current_storage()?.is_encrypted(),
|
||||
selected_db: server.selected_db,
|
||||
};
|
||||
let storage_info = server.current_storage()?.info()?;
|
||||
let mut info_map: std::collections::HashMap<String, String> = storage_info.into_iter().collect();
|
||||
|
||||
info_map.insert("redis_version".to_string(), "7.0.0".to_string());
|
||||
info_map.insert("selected_db".to_string(), server.selected_db.to_string());
|
||||
info_map.insert("backend".to_string(), format!("{:?}", server.option.backend));
|
||||
|
||||
|
||||
let mut info_string = String::new();
|
||||
info_string.push_str(&format!("# Server\n"));
|
||||
info_string.push_str(&format!("redis_version:{}\n", info.redis_version));
|
||||
info_string.push_str(&format!("encrypted:{}\n", if info.encrypted { 1 } else { 0 }));
|
||||
info_string.push_str(&format!("# Keyspace\n"));
|
||||
info_string.push_str(&format!("db{}:keys=0,expires=0,avg_ttl=0\n", info.selected_db));
|
||||
info_string.push_str("# Server\n");
|
||||
info_string.push_str(&format!("redis_version:{}\n", info_map.get("redis_version").unwrap()));
|
||||
info_string.push_str(&format!("backend:{}\n", info_map.get("backend").unwrap()));
|
||||
info_string.push_str(&format!("encrypted:{}\n", info_map.get("is_encrypted").unwrap()));
|
||||
|
||||
info_string.push_str("# Keyspace\n");
|
||||
info_string.push_str(&format!("db{}:keys={},expires=0,avg_ttl=0\n", info_map.get("selected_db").unwrap(), info_map.get("db_size").unwrap()));
|
||||
|
||||
match section {
|
||||
Some(s) => {
|
@@ -23,6 +23,7 @@ impl From<CryptoError> for crate::error::DBError {
|
||||
}
|
||||
|
||||
/// Super-simple factory: new(secret) + encrypt(bytes) + decrypt(bytes)
|
||||
#[derive(Clone)]
|
||||
pub struct CryptoFactory {
|
||||
key: chacha20poly1305::Key,
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
pub mod age;
|
||||
pub mod age; // NEW
|
||||
pub mod cmd;
|
||||
pub mod crypto;
|
||||
pub mod error;
|
||||
@@ -8,4 +8,5 @@ pub mod rpc;
|
||||
pub mod rpc_server;
|
||||
pub mod server;
|
||||
pub mod storage;
|
||||
pub mod openrpc_spec;
|
||||
pub mod storage_trait; // Add this
|
||||
pub mod storage_sled; // Add this
|
@@ -1,9 +1,8 @@
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
// #![allow(unused_imports)]
|
||||
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
use herodb::server;
|
||||
use herodb::server::Server;
|
||||
use herodb::rpc_server;
|
||||
|
||||
use clap::Parser;
|
||||
@@ -40,6 +39,10 @@ struct Args {
|
||||
/// RPC server port (default: 8080)
|
||||
#[arg(long, default_value = "8080")]
|
||||
rpc_port: u16,
|
||||
|
||||
/// Use the sled backend
|
||||
#[arg(long)]
|
||||
sled: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@@ -61,20 +64,30 @@ async fn main() {
|
||||
debug: args.debug,
|
||||
encryption_key: args.encryption_key,
|
||||
encrypt: args.encrypt,
|
||||
backend: if args.sled {
|
||||
herodb::options::BackendType::Sled
|
||||
} else {
|
||||
herodb::options::BackendType::Redb
|
||||
},
|
||||
};
|
||||
|
||||
let backend = option.backend.clone();
|
||||
|
||||
// new server
|
||||
let server = Arc::new(Mutex::new(server::Server::new(option).await));
|
||||
let mut server = server::Server::new(option).await;
|
||||
|
||||
// Initialize the default database storage
|
||||
let _ = server.current_storage();
|
||||
|
||||
// Add a small delay to ensure the port is ready
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
|
||||
// Start RPC server if enabled
|
||||
let _rpc_handle = if args.enable_rpc {
|
||||
let rpc_handle = if args.enable_rpc {
|
||||
let rpc_addr = format!("127.0.0.1:{}", args.rpc_port).parse().unwrap();
|
||||
let base_dir = args.dir.clone();
|
||||
|
||||
match rpc_server::start_rpc_server(rpc_addr, Arc::clone(&server), base_dir).await {
|
||||
match rpc_server::start_rpc_server(rpc_addr, base_dir, backend).await {
|
||||
Ok(handle) => {
|
||||
println!("RPC management server started on port {}", args.rpc_port);
|
||||
Some(handle)
|
||||
@@ -95,9 +108,9 @@ async fn main() {
|
||||
Ok((stream, _)) => {
|
||||
println!("accepted new connection");
|
||||
|
||||
let sc = Arc::clone(&server);
|
||||
let mut sc = server.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = Server::handle(sc, stream).await {
|
||||
if let Err(e) = sc.handle(stream).await {
|
||||
println!("error: {:?}, will close the connection. Bye", e);
|
||||
}
|
||||
});
|
15
src/options.rs
Normal file
15
src/options.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum BackendType {
|
||||
Redb,
|
||||
Sled,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DBOption {
|
||||
pub dir: String,
|
||||
pub port: u16,
|
||||
pub debug: bool,
|
||||
pub encrypt: bool,
|
||||
pub encryption_key: Option<String>,
|
||||
pub backend: BackendType,
|
||||
}
|
342
src/rpc.rs
Normal file
342
src/rpc.rs
Normal file
@@ -0,0 +1,342 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::server::Server;
|
||||
use crate::options::DBOption;
|
||||
|
||||
/// Database backend types
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum BackendType {
|
||||
Redb,
|
||||
Sled,
|
||||
// Future: InMemory, Custom(String)
|
||||
}
|
||||
|
||||
/// Database configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DatabaseConfig {
|
||||
pub name: Option<String>,
|
||||
pub storage_path: Option<String>,
|
||||
pub max_size: Option<u64>,
|
||||
pub redis_version: Option<String>,
|
||||
}
|
||||
|
||||
/// Database information returned by metadata queries
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DatabaseInfo {
|
||||
pub id: u64,
|
||||
pub name: Option<String>,
|
||||
pub backend: BackendType,
|
||||
pub encrypted: bool,
|
||||
pub redis_version: Option<String>,
|
||||
pub storage_path: Option<String>,
|
||||
pub size_on_disk: Option<u64>,
|
||||
pub key_count: Option<u64>,
|
||||
pub created_at: u64,
|
||||
pub last_access: Option<u64>,
|
||||
}
|
||||
|
||||
/// RPC trait for HeroDB management
|
||||
#[rpc(server, client, namespace = "herodb")]
|
||||
pub trait Rpc {
|
||||
/// Create a new database with specified configuration
|
||||
#[method(name = "createDatabase")]
|
||||
async fn create_database(
|
||||
&self,
|
||||
backend: BackendType,
|
||||
config: DatabaseConfig,
|
||||
encryption_key: Option<String>,
|
||||
) -> RpcResult<u64>;
|
||||
|
||||
/// Set encryption for an existing database (write-only key)
|
||||
#[method(name = "setEncryption")]
|
||||
async fn set_encryption(&self, db_id: u64, encryption_key: String) -> RpcResult<bool>;
|
||||
|
||||
/// List all managed databases
|
||||
#[method(name = "listDatabases")]
|
||||
async fn list_databases(&self) -> RpcResult<Vec<DatabaseInfo>>;
|
||||
|
||||
/// Get detailed information about a specific database
|
||||
#[method(name = "getDatabaseInfo")]
|
||||
async fn get_database_info(&self, db_id: u64) -> RpcResult<DatabaseInfo>;
|
||||
|
||||
/// Delete a database
|
||||
#[method(name = "deleteDatabase")]
|
||||
async fn delete_database(&self, db_id: u64) -> RpcResult<bool>;
|
||||
|
||||
/// Get server statistics
|
||||
#[method(name = "getServerStats")]
|
||||
async fn get_server_stats(&self) -> RpcResult<HashMap<String, serde_json::Value>>;
|
||||
}
|
||||
|
||||
/// RPC Server implementation
|
||||
pub struct RpcServerImpl {
|
||||
/// Base directory for database files
|
||||
base_dir: String,
|
||||
/// Managed database servers
|
||||
servers: Arc<RwLock<HashMap<u64, Arc<Server>>>>,
|
||||
/// Next unencrypted database ID to assign
|
||||
next_unencrypted_id: Arc<RwLock<u64>>,
|
||||
/// Next encrypted database ID to assign
|
||||
next_encrypted_id: Arc<RwLock<u64>>,
|
||||
/// Default backend type
|
||||
backend: crate::options::BackendType,
|
||||
}
|
||||
|
||||
impl RpcServerImpl {
|
||||
/// Create a new RPC server instance
|
||||
pub fn new(base_dir: String, backend: crate::options::BackendType) -> Self {
|
||||
Self {
|
||||
base_dir,
|
||||
servers: Arc::new(RwLock::new(HashMap::new())),
|
||||
next_unencrypted_id: Arc::new(RwLock::new(0)),
|
||||
next_encrypted_id: Arc::new(RwLock::new(10)),
|
||||
backend,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get or create a server instance for the given database ID
|
||||
async fn get_or_create_server(&self, db_id: u64) -> Result<Arc<Server>, jsonrpsee::types::ErrorObjectOwned> {
|
||||
// Check if server already exists
|
||||
{
|
||||
let servers = self.servers.read().await;
|
||||
if let Some(server) = servers.get(&db_id) {
|
||||
return Ok(server.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Check if database file exists
|
||||
let db_path = std::path::PathBuf::from(&self.base_dir).join(format!("{}.db", db_id));
|
||||
if !db_path.exists() {
|
||||
return Err(jsonrpsee::types::ErrorObjectOwned::owned(
|
||||
-32000,
|
||||
format!("Database {} not found", db_id),
|
||||
None::<()>
|
||||
));
|
||||
}
|
||||
|
||||
// Create server instance with default options
|
||||
let db_option = DBOption {
|
||||
dir: self.base_dir.clone(),
|
||||
port: 0, // Not used for RPC-managed databases
|
||||
debug: false,
|
||||
encryption_key: None,
|
||||
encrypt: false,
|
||||
backend: self.backend.clone(),
|
||||
};
|
||||
|
||||
let mut server = Server::new(db_option).await;
|
||||
|
||||
// Set the selected database to the db_id for proper file naming
|
||||
server.selected_db = db_id;
|
||||
|
||||
// Store the server
|
||||
let mut servers = self.servers.write().await;
|
||||
servers.insert(db_id, Arc::new(server.clone()));
|
||||
|
||||
Ok(Arc::new(server))
|
||||
}
|
||||
|
||||
/// Discover existing database files in the base directory
|
||||
async fn discover_databases(&self) -> Vec<u64> {
|
||||
let mut db_ids = Vec::new();
|
||||
|
||||
if let Ok(entries) = std::fs::read_dir(&self.base_dir) {
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(file_name) = entry.file_name().into_string() {
|
||||
// Check if it's a database file (ends with .db)
|
||||
if file_name.ends_with(".db") {
|
||||
// Extract database ID from filename (e.g., "11.db" -> 11)
|
||||
if let Some(id_str) = file_name.strip_suffix(".db") {
|
||||
if let Ok(db_id) = id_str.parse::<u64>() {
|
||||
db_ids.push(db_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
db_ids
|
||||
}
|
||||
|
||||
/// Get the next available database ID
|
||||
async fn get_next_db_id(&self, is_encrypted: bool) -> u64 {
|
||||
if is_encrypted {
|
||||
let mut id = self.next_encrypted_id.write().await;
|
||||
let current_id = *id;
|
||||
*id += 1;
|
||||
current_id
|
||||
} else {
|
||||
let mut id = self.next_unencrypted_id.write().await;
|
||||
let current_id = *id;
|
||||
*id += 1;
|
||||
current_id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[jsonrpsee::core::async_trait]
|
||||
impl RpcServer for RpcServerImpl {
|
||||
async fn create_database(
|
||||
&self,
|
||||
backend: BackendType,
|
||||
config: DatabaseConfig,
|
||||
encryption_key: Option<String>,
|
||||
) -> RpcResult<u64> {
|
||||
let db_id = self.get_next_db_id(encryption_key.is_some()).await;
|
||||
|
||||
// Handle both Redb and Sled backends
|
||||
match backend {
|
||||
BackendType::Redb | BackendType::Sled => {
|
||||
// Create database directory
|
||||
let db_dir = if let Some(path) = &config.storage_path {
|
||||
std::path::PathBuf::from(path)
|
||||
} else {
|
||||
std::path::PathBuf::from(&self.base_dir).join(format!("rpc_db_{}", db_id))
|
||||
};
|
||||
|
||||
// Ensure directory exists
|
||||
std::fs::create_dir_all(&db_dir)
|
||||
.map_err(|e| jsonrpsee::types::ErrorObjectOwned::owned(
|
||||
-32000,
|
||||
format!("Failed to create directory: {}", e),
|
||||
None::<()>
|
||||
))?;
|
||||
|
||||
// Create DB options
|
||||
let encrypt = encryption_key.is_some();
|
||||
let option = DBOption {
|
||||
dir: db_dir.to_string_lossy().to_string(),
|
||||
port: 0, // Not used for RPC-managed databases
|
||||
debug: false,
|
||||
encryption_key,
|
||||
encrypt,
|
||||
backend: match backend {
|
||||
BackendType::Redb => crate::options::BackendType::Redb,
|
||||
BackendType::Sled => crate::options::BackendType::Sled,
|
||||
},
|
||||
};
|
||||
|
||||
// Create server instance
|
||||
let mut server = Server::new(option).await;
|
||||
|
||||
// Set the selected database to the db_id for proper file naming
|
||||
server.selected_db = db_id;
|
||||
|
||||
// Initialize the storage to create the database file
|
||||
let _ = server.current_storage();
|
||||
|
||||
// Store the server
|
||||
let mut servers = self.servers.write().await;
|
||||
servers.insert(db_id, Arc::new(server));
|
||||
|
||||
Ok(db_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn set_encryption(&self, db_id: u64, _encryption_key: String) -> RpcResult<bool> {
|
||||
// Note: In a real implementation, we'd need to modify the existing database
|
||||
// For now, return false as encryption can only be set during creation
|
||||
let _servers = self.servers.read().await;
|
||||
// TODO: Implement encryption setting for existing databases
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
async fn list_databases(&self) -> RpcResult<Vec<DatabaseInfo>> {
|
||||
let db_ids = self.discover_databases().await;
|
||||
let mut result = Vec::new();
|
||||
|
||||
for db_id in db_ids {
|
||||
// Try to get or create server for this database
|
||||
if let Ok(server) = self.get_or_create_server(db_id).await {
|
||||
let backend = match server.option.backend {
|
||||
crate::options::BackendType::Redb => BackendType::Redb,
|
||||
crate::options::BackendType::Sled => BackendType::Sled,
|
||||
};
|
||||
|
||||
let info = DatabaseInfo {
|
||||
id: db_id,
|
||||
name: None, // TODO: Store name in server metadata
|
||||
backend,
|
||||
encrypted: server.option.encrypt,
|
||||
redis_version: Some("7.0".to_string()), // Default Redis compatibility
|
||||
storage_path: Some(server.option.dir.clone()),
|
||||
size_on_disk: None, // TODO: Calculate actual size
|
||||
key_count: None, // TODO: Get key count from storage
|
||||
created_at: std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs(),
|
||||
last_access: None,
|
||||
};
|
||||
result.push(info);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn get_database_info(&self, db_id: u64) -> RpcResult<DatabaseInfo> {
|
||||
let server = self.get_or_create_server(db_id).await?;
|
||||
|
||||
let backend = match server.option.backend {
|
||||
crate::options::BackendType::Redb => BackendType::Redb,
|
||||
crate::options::BackendType::Sled => BackendType::Sled,
|
||||
};
|
||||
|
||||
Ok(DatabaseInfo {
|
||||
id: db_id,
|
||||
name: None,
|
||||
backend,
|
||||
encrypted: server.option.encrypt,
|
||||
redis_version: Some("7.0".to_string()),
|
||||
storage_path: Some(server.option.dir.clone()),
|
||||
size_on_disk: None,
|
||||
key_count: None,
|
||||
created_at: std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs(),
|
||||
last_access: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn delete_database(&self, db_id: u64) -> RpcResult<bool> {
|
||||
let mut servers = self.servers.write().await;
|
||||
|
||||
if let Some(_server) = servers.remove(&db_id) {
|
||||
// Clean up database files
|
||||
let db_path = std::path::PathBuf::from(&self.base_dir).join(format!("{}.db", db_id));
|
||||
if db_path.exists() {
|
||||
if db_path.is_dir() {
|
||||
std::fs::remove_dir_all(&db_path).ok();
|
||||
} else {
|
||||
std::fs::remove_file(&db_path).ok();
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_server_stats(&self) -> RpcResult<HashMap<String, serde_json::Value>> {
|
||||
let db_ids = self.discover_databases().await;
|
||||
let mut stats = HashMap::new();
|
||||
|
||||
stats.insert("total_databases".to_string(), serde_json::json!(db_ids.len()));
|
||||
stats.insert("uptime".to_string(), serde_json::json!(
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs()
|
||||
));
|
||||
|
||||
Ok(stats)
|
||||
}
|
||||
}
|
@@ -2,24 +2,16 @@ use std::net::SocketAddr;
|
||||
use jsonrpsee::server::{ServerBuilder, ServerHandle};
|
||||
use jsonrpsee::RpcModule;
|
||||
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use crate::rpc::{RpcServer, RpcDiscoveryServer, RpcServerImpl};
|
||||
use crate::server::Server;
|
||||
use crate::rpc::{RpcServer, RpcServerImpl};
|
||||
|
||||
/// Start the RPC server on the specified address
|
||||
pub async fn start_rpc_server(
|
||||
addr: SocketAddr,
|
||||
main_server: Arc<Mutex<Server>>,
|
||||
base_dir: String
|
||||
) -> Result<ServerHandle, Box<dyn std::error::Error + Send + Sync>> {
|
||||
pub async fn start_rpc_server(addr: SocketAddr, base_dir: String, backend: crate::options::BackendType) -> Result<ServerHandle, Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create the RPC server implementation
|
||||
let rpc_impl = RpcServerImpl::new(main_server, base_dir);
|
||||
let rpc_impl = RpcServerImpl::new(base_dir, backend);
|
||||
|
||||
// Create the RPC module
|
||||
let mut module = RpcModule::new(());
|
||||
module.merge(RpcServer::into_rpc(rpc_impl.clone()))?;
|
||||
module.merge(RpcDiscoveryServer::into_rpc(rpc_impl))?;
|
||||
module.merge(RpcServer::into_rpc(rpc_impl))?;
|
||||
|
||||
// Build the server with both HTTP and WebSocket support
|
||||
let server = ServerBuilder::default()
|
||||
@@ -43,15 +35,9 @@ mod tests {
|
||||
async fn test_rpc_server_startup() {
|
||||
let addr = "127.0.0.1:0".parse().unwrap(); // Use port 0 for auto-assignment
|
||||
let base_dir = "/tmp/test_rpc".to_string();
|
||||
let backend = crate::options::BackendType::Redb; // Default for test
|
||||
|
||||
let main_server = Arc::new(Mutex::new(crate::server::Server::new(crate::options::DBOption {
|
||||
dir: "/tmp".to_string(),
|
||||
port: 0,
|
||||
debug: false,
|
||||
encryption_key: None,
|
||||
encrypt: false,
|
||||
}).await));
|
||||
let handle = start_rpc_server(addr, main_server, base_dir).await.unwrap();
|
||||
let handle = start_rpc_server(addr, base_dir, backend).await.unwrap();
|
||||
|
||||
// Give the server a moment to start
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
@@ -12,10 +12,12 @@ use crate::error::DBError;
|
||||
use crate::options;
|
||||
use crate::protocol::Protocol;
|
||||
use crate::storage::Storage;
|
||||
use crate::storage_sled::SledStorage;
|
||||
use crate::storage_trait::StorageBackend;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Server {
|
||||
pub db_cache: std::sync::Arc<std::sync::RwLock<HashMap<u64, Arc<Storage>>>>,
|
||||
pub db_cache: std::sync::Arc<std::sync::RwLock<HashMap<u64, Arc<dyn StorageBackend>>>>,
|
||||
pub option: options::DBOption,
|
||||
pub client_name: Option<String>,
|
||||
pub selected_db: u64, // Changed from usize to u64
|
||||
@@ -52,7 +54,7 @@ impl Server {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn current_storage(&self) -> Result<Arc<Storage>, DBError> {
|
||||
pub fn current_storage(&self) -> Result<Arc<dyn StorageBackend>, DBError> {
|
||||
let mut cache = self.db_cache.write().unwrap();
|
||||
|
||||
if let Some(storage) = cache.get(&self.selected_db) {
|
||||
@@ -73,11 +75,22 @@ impl Server {
|
||||
|
||||
println!("Creating new db file: {}", db_file_path.display());
|
||||
|
||||
let storage = Arc::new(Storage::new(
|
||||
db_file_path,
|
||||
self.should_encrypt_db(self.selected_db),
|
||||
self.option.encryption_key.as_deref()
|
||||
)?);
|
||||
let storage: Arc<dyn StorageBackend> = match self.option.backend {
|
||||
options::BackendType::Redb => {
|
||||
Arc::new(Storage::new(
|
||||
db_file_path,
|
||||
self.should_encrypt_db(self.selected_db),
|
||||
self.option.encryption_key.as_deref()
|
||||
)?)
|
||||
}
|
||||
options::BackendType::Sled => {
|
||||
Arc::new(SledStorage::new(
|
||||
db_file_path,
|
||||
self.should_encrypt_db(self.selected_db),
|
||||
self.option.encryption_key.as_deref()
|
||||
)?)
|
||||
}
|
||||
};
|
||||
|
||||
cache.insert(self.selected_db, storage.clone());
|
||||
Ok(storage)
|
||||
@@ -167,7 +180,7 @@ impl Server {
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
server: Arc<Mutex<Server>>,
|
||||
&mut self,
|
||||
mut stream: tokio::net::TcpStream,
|
||||
) -> Result<(), DBError> {
|
||||
// Accumulate incoming bytes to handle partial RESP frames
|
||||
@@ -205,49 +218,31 @@ impl Server {
|
||||
// Advance the accumulator to the unparsed remainder
|
||||
acc = remaining.to_string();
|
||||
|
||||
if self.option.debug {
|
||||
println!("\x1b[34;1mgot command: {:?}, protocol: {:?}\x1b[0m", cmd, protocol);
|
||||
} else {
|
||||
println!("got command: {:?}, protocol: {:?}", cmd, protocol);
|
||||
}
|
||||
|
||||
// Check if this is a QUIT command before processing
|
||||
let is_quit = matches!(cmd, Cmd::Quit);
|
||||
|
||||
// Lock the server only for command execution
|
||||
let (res, debug_info) = {
|
||||
let mut server_guard = server.lock().await;
|
||||
|
||||
if server_guard.option.debug {
|
||||
println!("\x1b[34;1mgot command: {:?}, protocol: {:?}\x1b[0m", cmd, protocol);
|
||||
} else {
|
||||
println!("got command: {:?}, protocol: {:?}", cmd, protocol);
|
||||
}
|
||||
|
||||
let res = match cmd.run(&mut server_guard).await {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
if server_guard.option.debug {
|
||||
eprintln!("[run error] {:?}", e);
|
||||
}
|
||||
Protocol::err(&format!("ERR {}", e.0))
|
||||
let res = match cmd.run(self).await {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
if self.option.debug {
|
||||
eprintln!("[run error] {:?}", e);
|
||||
}
|
||||
};
|
||||
|
||||
let debug_info = if server_guard.option.debug {
|
||||
Some((format!("queued cmd {:?}", server_guard.queued_cmd), format!("going to send response {}", res.encode())))
|
||||
} else {
|
||||
Some((format!("queued cmd {:?}", server_guard.queued_cmd), format!("going to send response {}", res.encode())))
|
||||
};
|
||||
|
||||
(res, debug_info)
|
||||
Protocol::err(&format!("ERR {}", e.0))
|
||||
}
|
||||
};
|
||||
|
||||
// Print debug info outside the lock
|
||||
if let Some((queued_info, response_info)) = debug_info {
|
||||
if let Some((_, response)) = response_info.split_once("going to send response ") {
|
||||
if queued_info.contains("\x1b[34;1m") {
|
||||
println!("\x1b[34;1m{}\x1b[0m", queued_info);
|
||||
println!("\x1b[32;1mgoing to send response {}\x1b[0m", response);
|
||||
} else {
|
||||
println!("{}", queued_info);
|
||||
println!("going to send response {}", response);
|
||||
}
|
||||
}
|
||||
if self.option.debug {
|
||||
println!("\x1b[34;1mqueued cmd {:?}\x1b[0m", self.queued_cmd);
|
||||
println!("\x1b[32;1mgoing to send response {}\x1b[0m", res.encode());
|
||||
} else {
|
||||
print!("queued cmd {:?}", self.queued_cmd);
|
||||
println!("going to send response {}", res.encode());
|
||||
}
|
||||
|
||||
_ = stream.write(res.encode().as_bytes()).await?;
|
287
src/storage/mod.rs
Normal file
287
src/storage/mod.rs
Normal file
@@ -0,0 +1,287 @@
|
||||
use std::{
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use redb::{Database, TableDefinition};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::crypto::CryptoFactory;
|
||||
use crate::error::DBError;
|
||||
|
||||
// Re-export modules
|
||||
mod storage_basic;
|
||||
mod storage_hset;
|
||||
mod storage_lists;
|
||||
mod storage_extra;
|
||||
|
||||
// Re-export implementations
|
||||
// Note: These imports are used by the impl blocks in the submodules
|
||||
// The compiler shows them as unused because they're not directly used in this file
|
||||
// but they're needed for the Storage struct methods to be available
|
||||
pub use storage_extra::*;
|
||||
|
||||
// Table definitions for different Redis data types
|
||||
const TYPES_TABLE: TableDefinition<&str, &str> = TableDefinition::new("types");
|
||||
const STRINGS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("strings");
|
||||
const HASHES_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("hashes");
|
||||
const LISTS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("lists");
|
||||
const STREAMS_META_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("streams_meta");
|
||||
const STREAMS_DATA_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("streams_data");
|
||||
const ENCRYPTED_TABLE: TableDefinition<&str, u8> = TableDefinition::new("encrypted");
|
||||
const EXPIRATION_TABLE: TableDefinition<&str, u64> = TableDefinition::new("expiration");
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct StreamEntry {
|
||||
pub fields: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ListValue {
|
||||
pub elements: Vec<String>,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn now_in_millis() -> u128 {
|
||||
let start = SystemTime::now();
|
||||
let duration_since_epoch = start.duration_since(UNIX_EPOCH).unwrap();
|
||||
duration_since_epoch.as_millis()
|
||||
}
|
||||
|
||||
pub struct Storage {
|
||||
db: Database,
|
||||
crypto: Option<CryptoFactory>,
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn new(path: impl AsRef<Path>, should_encrypt: bool, master_key: Option<&str>) -> Result<Self, DBError> {
|
||||
let db = Database::create(path)?;
|
||||
|
||||
// Create tables if they don't exist
|
||||
let write_txn = db.begin_write()?;
|
||||
{
|
||||
let _ = write_txn.open_table(TYPES_TABLE)?;
|
||||
let _ = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let _ = write_txn.open_table(HASHES_TABLE)?;
|
||||
let _ = write_txn.open_table(LISTS_TABLE)?;
|
||||
let _ = write_txn.open_table(STREAMS_META_TABLE)?;
|
||||
let _ = write_txn.open_table(STREAMS_DATA_TABLE)?;
|
||||
let _ = write_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
let _ = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
|
||||
// Check if database was previously encrypted
|
||||
let read_txn = db.begin_read()?;
|
||||
let encrypted_table = read_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
let was_encrypted = encrypted_table.get("encrypted")?.map(|v| v.value() == 1).unwrap_or(false);
|
||||
drop(read_txn);
|
||||
|
||||
let crypto = if should_encrypt || was_encrypted {
|
||||
if let Some(key) = master_key {
|
||||
Some(CryptoFactory::new(key.as_bytes()))
|
||||
} else {
|
||||
return Err(DBError("Encryption requested but no master key provided".to_string()));
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// If we're enabling encryption for the first time, mark it
|
||||
if should_encrypt && !was_encrypted {
|
||||
let write_txn = db.begin_write()?;
|
||||
{
|
||||
let mut encrypted_table = write_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
encrypted_table.insert("encrypted", &1u8)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
}
|
||||
|
||||
Ok(Storage {
|
||||
db,
|
||||
crypto,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_encrypted(&self) -> bool {
|
||||
self.crypto.is_some()
|
||||
}
|
||||
|
||||
// Helper methods for encryption
|
||||
fn encrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
|
||||
if let Some(crypto) = &self.crypto {
|
||||
Ok(crypto.encrypt(data))
|
||||
} else {
|
||||
Ok(data.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
fn decrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
|
||||
if let Some(crypto) = &self.crypto {
|
||||
Ok(crypto.decrypt(data)?)
|
||||
} else {
|
||||
Ok(data.to_vec())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use crate::storage_trait::StorageBackend;
|
||||
|
||||
impl StorageBackend for Storage {
|
||||
fn get(&self, key: &str) -> Result<Option<String>, DBError> {
|
||||
self.get(key)
|
||||
}
|
||||
|
||||
fn set(&self, key: String, value: String) -> Result<(), DBError> {
|
||||
self.set(key, value)
|
||||
}
|
||||
|
||||
fn setx(&self, key: String, value: String, expire_ms: u128) -> Result<(), DBError> {
|
||||
self.setx(key, value, expire_ms)
|
||||
}
|
||||
|
||||
fn del(&self, key: String) -> Result<(), DBError> {
|
||||
self.del(key)
|
||||
}
|
||||
|
||||
fn exists(&self, key: &str) -> Result<bool, DBError> {
|
||||
self.exists(key)
|
||||
}
|
||||
|
||||
fn keys(&self, pattern: &str) -> Result<Vec<String>, DBError> {
|
||||
self.keys(pattern)
|
||||
}
|
||||
|
||||
fn dbsize(&self) -> Result<i64, DBError> {
|
||||
self.dbsize()
|
||||
}
|
||||
|
||||
fn flushdb(&self) -> Result<(), DBError> {
|
||||
self.flushdb()
|
||||
}
|
||||
|
||||
fn get_key_type(&self, key: &str) -> Result<Option<String>, DBError> {
|
||||
self.get_key_type(key)
|
||||
}
|
||||
|
||||
fn scan(&self, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError> {
|
||||
self.scan(cursor, pattern, count)
|
||||
}
|
||||
|
||||
fn hscan(&self, key: &str, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError> {
|
||||
self.hscan(key, cursor, pattern, count)
|
||||
}
|
||||
|
||||
fn hset(&self, key: &str, pairs: Vec<(String, String)>) -> Result<i64, DBError> {
|
||||
self.hset(key, pairs)
|
||||
}
|
||||
|
||||
fn hget(&self, key: &str, field: &str) -> Result<Option<String>, DBError> {
|
||||
self.hget(key, field)
|
||||
}
|
||||
|
||||
fn hgetall(&self, key: &str) -> Result<Vec<(String, String)>, DBError> {
|
||||
self.hgetall(key)
|
||||
}
|
||||
|
||||
fn hdel(&self, key: &str, fields: Vec<String>) -> Result<i64, DBError> {
|
||||
self.hdel(key, fields)
|
||||
}
|
||||
|
||||
fn hexists(&self, key: &str, field: &str) -> Result<bool, DBError> {
|
||||
self.hexists(key, field)
|
||||
}
|
||||
|
||||
fn hkeys(&self, key: &str) -> Result<Vec<String>, DBError> {
|
||||
self.hkeys(key)
|
||||
}
|
||||
|
||||
fn hvals(&self, key: &str) -> Result<Vec<String>, DBError> {
|
||||
self.hvals(key)
|
||||
}
|
||||
|
||||
fn hlen(&self, key: &str) -> Result<i64, DBError> {
|
||||
self.hlen(key)
|
||||
}
|
||||
|
||||
fn hmget(&self, key: &str, fields: Vec<String>) -> Result<Vec<Option<String>>, DBError> {
|
||||
self.hmget(key, fields)
|
||||
}
|
||||
|
||||
fn hsetnx(&self, key: &str, field: &str, value: &str) -> Result<bool, DBError> {
|
||||
self.hsetnx(key, field, value)
|
||||
}
|
||||
|
||||
fn lpush(&self, key: &str, elements: Vec<String>) -> Result<i64, DBError> {
|
||||
self.lpush(key, elements)
|
||||
}
|
||||
|
||||
fn rpush(&self, key: &str, elements: Vec<String>) -> Result<i64, DBError> {
|
||||
self.rpush(key, elements)
|
||||
}
|
||||
|
||||
fn lpop(&self, key: &str, count: u64) -> Result<Vec<String>, DBError> {
|
||||
self.lpop(key, count)
|
||||
}
|
||||
|
||||
fn rpop(&self, key: &str, count: u64) -> Result<Vec<String>, DBError> {
|
||||
self.rpop(key, count)
|
||||
}
|
||||
|
||||
fn llen(&self, key: &str) -> Result<i64, DBError> {
|
||||
self.llen(key)
|
||||
}
|
||||
|
||||
fn lindex(&self, key: &str, index: i64) -> Result<Option<String>, DBError> {
|
||||
self.lindex(key, index)
|
||||
}
|
||||
|
||||
fn lrange(&self, key: &str, start: i64, stop: i64) -> Result<Vec<String>, DBError> {
|
||||
self.lrange(key, start, stop)
|
||||
}
|
||||
|
||||
fn ltrim(&self, key: &str, start: i64, stop: i64) -> Result<(), DBError> {
|
||||
self.ltrim(key, start, stop)
|
||||
}
|
||||
|
||||
fn lrem(&self, key: &str, count: i64, element: &str) -> Result<i64, DBError> {
|
||||
self.lrem(key, count, element)
|
||||
}
|
||||
|
||||
fn ttl(&self, key: &str) -> Result<i64, DBError> {
|
||||
self.ttl(key)
|
||||
}
|
||||
|
||||
fn expire_seconds(&self, key: &str, secs: u64) -> Result<bool, DBError> {
|
||||
self.expire_seconds(key, secs)
|
||||
}
|
||||
|
||||
fn pexpire_millis(&self, key: &str, ms: u128) -> Result<bool, DBError> {
|
||||
self.pexpire_millis(key, ms)
|
||||
}
|
||||
|
||||
fn persist(&self, key: &str) -> Result<bool, DBError> {
|
||||
self.persist(key)
|
||||
}
|
||||
|
||||
fn expire_at_seconds(&self, key: &str, ts_secs: i64) -> Result<bool, DBError> {
|
||||
self.expire_at_seconds(key, ts_secs)
|
||||
}
|
||||
|
||||
fn pexpire_at_millis(&self, key: &str, ts_ms: i64) -> Result<bool, DBError> {
|
||||
self.pexpire_at_millis(key, ts_ms)
|
||||
}
|
||||
|
||||
fn is_encrypted(&self) -> bool {
|
||||
self.is_encrypted()
|
||||
}
|
||||
|
||||
fn info(&self) -> Result<Vec<(String, String)>, DBError> {
|
||||
self.info()
|
||||
}
|
||||
|
||||
fn clone_arc(&self) -> Arc<dyn StorageBackend> {
|
||||
unimplemented!("Storage cloning not yet implemented for redb backend")
|
||||
}
|
||||
}
|
@@ -208,6 +208,14 @@ impl Storage {
|
||||
write_txn.commit()?;
|
||||
Ok(applied)
|
||||
}
|
||||
|
||||
pub fn info(&self) -> Result<Vec<(String, String)>, DBError> {
|
||||
let dbsize = self.dbsize()?;
|
||||
Ok(vec![
|
||||
("db_size".to_string(), dbsize.to_string()),
|
||||
("is_encrypted".to_string(), self.is_encrypted().to_string()),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
// Utility function for glob pattern matching
|
845
src/storage_sled/mod.rs
Normal file
845
src/storage_sled/mod.rs
Normal file
@@ -0,0 +1,845 @@
|
||||
// src/storage_sled/mod.rs
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::collections::HashMap;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use crate::error::DBError;
|
||||
use crate::storage_trait::StorageBackend;
|
||||
use crate::crypto::CryptoFactory;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
enum ValueType {
|
||||
String(String),
|
||||
Hash(HashMap<String, String>),
|
||||
List(Vec<String>),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
struct StorageValue {
|
||||
value: ValueType,
|
||||
expires_at: Option<u128>, // milliseconds since epoch
|
||||
}
|
||||
|
||||
pub struct SledStorage {
|
||||
db: sled::Db,
|
||||
types: sled::Tree,
|
||||
crypto: Option<CryptoFactory>,
|
||||
}
|
||||
|
||||
impl SledStorage {
|
||||
pub fn new(path: impl AsRef<Path>, should_encrypt: bool, master_key: Option<&str>) -> Result<Self, DBError> {
|
||||
let db = sled::open(path).map_err(|e| DBError(format!("Failed to open sled: {}", e)))?;
|
||||
let types = db.open_tree("types").map_err(|e| DBError(format!("Failed to open types tree: {}", e)))?;
|
||||
|
||||
// Check if database was previously encrypted
|
||||
let encrypted_tree = db.open_tree("encrypted").map_err(|e| DBError(e.to_string()))?;
|
||||
let was_encrypted = encrypted_tree.get("encrypted")
|
||||
.map_err(|e| DBError(e.to_string()))?
|
||||
.map(|v| v[0] == 1)
|
||||
.unwrap_or(false);
|
||||
|
||||
let crypto = if should_encrypt || was_encrypted {
|
||||
if let Some(key) = master_key {
|
||||
Some(CryptoFactory::new(key.as_bytes()))
|
||||
} else {
|
||||
return Err(DBError("Encryption requested but no master key provided".to_string()));
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Mark database as encrypted if enabling encryption
|
||||
if should_encrypt && !was_encrypted {
|
||||
encrypted_tree.insert("encrypted", &[1u8])
|
||||
.map_err(|e| DBError(e.to_string()))?;
|
||||
encrypted_tree.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(SledStorage { db, types, crypto })
|
||||
}
|
||||
|
||||
fn now_millis() -> u128 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis()
|
||||
}
|
||||
|
||||
fn encrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
|
||||
if let Some(crypto) = &self.crypto {
|
||||
Ok(crypto.encrypt(data))
|
||||
} else {
|
||||
Ok(data.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
fn decrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
|
||||
if let Some(crypto) = &self.crypto {
|
||||
Ok(crypto.decrypt(data)?)
|
||||
} else {
|
||||
Ok(data.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_storage_value(&self, key: &str) -> Result<Option<StorageValue>, DBError> {
|
||||
match self.db.get(key).map_err(|e| DBError(e.to_string()))? {
|
||||
Some(encrypted_data) => {
|
||||
let decrypted = self.decrypt_if_needed(&encrypted_data)?;
|
||||
let storage_val: StorageValue = bincode::deserialize(&decrypted)
|
||||
.map_err(|e| DBError(format!("Deserialization error: {}", e)))?;
|
||||
|
||||
// Check expiration
|
||||
if let Some(expires_at) = storage_val.expires_at {
|
||||
if Self::now_millis() > expires_at {
|
||||
// Expired, remove it
|
||||
self.db.remove(key).map_err(|e| DBError(e.to_string()))?;
|
||||
self.types.remove(key).map_err(|e| DBError(e.to_string()))?;
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(storage_val))
|
||||
}
|
||||
None => Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn set_storage_value(&self, key: &str, storage_val: StorageValue) -> Result<(), DBError> {
|
||||
let data = bincode::serialize(&storage_val)
|
||||
.map_err(|e| DBError(format!("Serialization error: {}", e)))?;
|
||||
let encrypted = self.encrypt_if_needed(&data)?;
|
||||
self.db.insert(key, encrypted).map_err(|e| DBError(e.to_string()))?;
|
||||
|
||||
// Store type info (unencrypted for efficiency)
|
||||
let type_str = match &storage_val.value {
|
||||
ValueType::String(_) => "string",
|
||||
ValueType::Hash(_) => "hash",
|
||||
ValueType::List(_) => "list",
|
||||
};
|
||||
self.types.insert(key, type_str.as_bytes()).map_err(|e| DBError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn glob_match(pattern: &str, text: &str) -> bool {
|
||||
if pattern == "*" {
|
||||
return true;
|
||||
}
|
||||
|
||||
let pattern_chars: Vec<char> = pattern.chars().collect();
|
||||
let text_chars: Vec<char> = text.chars().collect();
|
||||
|
||||
fn match_recursive(pattern: &[char], text: &[char], pi: usize, ti: usize) -> bool {
|
||||
if pi >= pattern.len() {
|
||||
return ti >= text.len();
|
||||
}
|
||||
|
||||
if ti >= text.len() {
|
||||
return pattern[pi..].iter().all(|&c| c == '*');
|
||||
}
|
||||
|
||||
match pattern[pi] {
|
||||
'*' => {
|
||||
for i in ti..=text.len() {
|
||||
if match_recursive(pattern, text, pi + 1, i) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
'?' => match_recursive(pattern, text, pi + 1, ti + 1),
|
||||
c => {
|
||||
if text[ti] == c {
|
||||
match_recursive(pattern, text, pi + 1, ti + 1)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match_recursive(&pattern_chars, &text_chars, 0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
impl StorageBackend for SledStorage {
|
||||
fn get(&self, key: &str) -> Result<Option<String>, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::String(s) => Ok(Some(s)),
|
||||
_ => Ok(None)
|
||||
}
|
||||
None => Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn set(&self, key: String, value: String) -> Result<(), DBError> {
|
||||
let storage_val = StorageValue {
|
||||
value: ValueType::String(value),
|
||||
expires_at: None,
|
||||
};
|
||||
self.set_storage_value(&key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn setx(&self, key: String, value: String, expire_ms: u128) -> Result<(), DBError> {
|
||||
let storage_val = StorageValue {
|
||||
value: ValueType::String(value),
|
||||
expires_at: Some(Self::now_millis() + expire_ms),
|
||||
};
|
||||
self.set_storage_value(&key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn del(&self, key: String) -> Result<(), DBError> {
|
||||
self.db.remove(&key).map_err(|e| DBError(e.to_string()))?;
|
||||
self.types.remove(&key).map_err(|e| DBError(e.to_string()))?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn exists(&self, key: &str) -> Result<bool, DBError> {
|
||||
// Check with expiration
|
||||
Ok(self.get_storage_value(key)?.is_some())
|
||||
}
|
||||
|
||||
fn keys(&self, pattern: &str) -> Result<Vec<String>, DBError> {
|
||||
let mut keys = Vec::new();
|
||||
for item in self.types.iter() {
|
||||
let (key_bytes, _) = item.map_err(|e| DBError(e.to_string()))?;
|
||||
let key = String::from_utf8_lossy(&key_bytes).to_string();
|
||||
|
||||
// Check if key is expired
|
||||
if self.get_storage_value(&key)?.is_some() {
|
||||
if Self::glob_match(pattern, &key) {
|
||||
keys.push(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
fn scan(&self, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError> {
|
||||
let mut result = Vec::new();
|
||||
let mut current_cursor = 0u64;
|
||||
let limit = count.unwrap_or(10) as usize;
|
||||
|
||||
for item in self.types.iter() {
|
||||
if current_cursor >= cursor {
|
||||
let (key_bytes, type_bytes) = item.map_err(|e| DBError(e.to_string()))?;
|
||||
let key = String::from_utf8_lossy(&key_bytes).to_string();
|
||||
|
||||
// Check pattern match
|
||||
let matches = if let Some(pat) = pattern {
|
||||
Self::glob_match(pat, &key)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
if matches {
|
||||
// Check if key is expired and get value
|
||||
if let Some(storage_val) = self.get_storage_value(&key)? {
|
||||
let value = match storage_val.value {
|
||||
ValueType::String(s) => s,
|
||||
_ => String::from_utf8_lossy(&type_bytes).to_string(),
|
||||
};
|
||||
result.push((key, value));
|
||||
|
||||
if result.len() >= limit {
|
||||
current_cursor += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
current_cursor += 1;
|
||||
}
|
||||
|
||||
let next_cursor = if result.len() < limit { 0 } else { current_cursor };
|
||||
Ok((next_cursor, result))
|
||||
}
|
||||
|
||||
fn dbsize(&self) -> Result<i64, DBError> {
|
||||
let mut count = 0i64;
|
||||
for item in self.types.iter() {
|
||||
let (key_bytes, _) = item.map_err(|e| DBError(e.to_string()))?;
|
||||
let key = String::from_utf8_lossy(&key_bytes).to_string();
|
||||
if self.get_storage_value(&key)?.is_some() {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
fn flushdb(&self) -> Result<(), DBError> {
|
||||
self.db.clear().map_err(|e| DBError(e.to_string()))?;
|
||||
self.types.clear().map_err(|e| DBError(e.to_string()))?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_key_type(&self, key: &str) -> Result<Option<String>, DBError> {
|
||||
// First check if key exists (handles expiration)
|
||||
if self.get_storage_value(key)?.is_some() {
|
||||
match self.types.get(key).map_err(|e| DBError(e.to_string()))? {
|
||||
Some(data) => Ok(Some(String::from_utf8_lossy(&data).to_string())),
|
||||
None => Ok(None)
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
// Hash operations
|
||||
fn hset(&self, key: &str, pairs: Vec<(String, String)>) -> Result<i64, DBError> {
|
||||
let mut storage_val = self.get_storage_value(key)?.unwrap_or(StorageValue {
|
||||
value: ValueType::Hash(HashMap::new()),
|
||||
expires_at: None,
|
||||
});
|
||||
|
||||
let hash = match &mut storage_val.value {
|
||||
ValueType::Hash(h) => h,
|
||||
_ => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
};
|
||||
|
||||
let mut new_fields = 0i64;
|
||||
for (field, value) in pairs {
|
||||
if !hash.contains_key(&field) {
|
||||
new_fields += 1;
|
||||
}
|
||||
hash.insert(field, value);
|
||||
}
|
||||
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(new_fields)
|
||||
}
|
||||
|
||||
fn hget(&self, key: &str, field: &str) -> Result<Option<String>, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::Hash(h) => Ok(h.get(field).cloned()),
|
||||
_ => Ok(None)
|
||||
}
|
||||
None => Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn hgetall(&self, key: &str) -> Result<Vec<(String, String)>, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::Hash(h) => Ok(h.into_iter().collect()),
|
||||
_ => Ok(Vec::new())
|
||||
}
|
||||
None => Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
fn hscan(&self, key: &str, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::Hash(h) => {
|
||||
let mut result = Vec::new();
|
||||
let mut current_cursor = 0u64;
|
||||
let limit = count.unwrap_or(10) as usize;
|
||||
|
||||
for (field, value) in h.iter() {
|
||||
if current_cursor >= cursor {
|
||||
let matches = if let Some(pat) = pattern {
|
||||
Self::glob_match(pat, field)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
if matches {
|
||||
result.push((field.clone(), value.clone()));
|
||||
if result.len() >= limit {
|
||||
current_cursor += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
current_cursor += 1;
|
||||
}
|
||||
|
||||
let next_cursor = if result.len() < limit { 0 } else { current_cursor };
|
||||
Ok((next_cursor, result))
|
||||
}
|
||||
_ => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string()))
|
||||
}
|
||||
None => Ok((0, Vec::new()))
|
||||
}
|
||||
}
|
||||
|
||||
fn hdel(&self, key: &str, fields: Vec<String>) -> Result<i64, DBError> {
|
||||
let mut storage_val = match self.get_storage_value(key)? {
|
||||
Some(sv) => sv,
|
||||
None => return Ok(0)
|
||||
};
|
||||
|
||||
let hash = match &mut storage_val.value {
|
||||
ValueType::Hash(h) => h,
|
||||
_ => return Ok(0)
|
||||
};
|
||||
|
||||
let mut deleted = 0i64;
|
||||
for field in fields {
|
||||
if hash.remove(&field).is_some() {
|
||||
deleted += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if hash.is_empty() {
|
||||
self.del(key.to_string())?;
|
||||
} else {
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(deleted)
|
||||
}
|
||||
|
||||
fn hexists(&self, key: &str, field: &str) -> Result<bool, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::Hash(h) => Ok(h.contains_key(field)),
|
||||
_ => Ok(false)
|
||||
}
|
||||
None => Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
fn hkeys(&self, key: &str) -> Result<Vec<String>, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::Hash(h) => Ok(h.keys().cloned().collect()),
|
||||
_ => Ok(Vec::new())
|
||||
}
|
||||
None => Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
fn hvals(&self, key: &str) -> Result<Vec<String>, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::Hash(h) => Ok(h.values().cloned().collect()),
|
||||
_ => Ok(Vec::new())
|
||||
}
|
||||
None => Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
fn hlen(&self, key: &str) -> Result<i64, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::Hash(h) => Ok(h.len() as i64),
|
||||
_ => Ok(0)
|
||||
}
|
||||
None => Ok(0)
|
||||
}
|
||||
}
|
||||
|
||||
fn hmget(&self, key: &str, fields: Vec<String>) -> Result<Vec<Option<String>>, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::Hash(h) => {
|
||||
Ok(fields.into_iter().map(|f| h.get(&f).cloned()).collect())
|
||||
}
|
||||
_ => Ok(fields.into_iter().map(|_| None).collect())
|
||||
}
|
||||
None => Ok(fields.into_iter().map(|_| None).collect())
|
||||
}
|
||||
}
|
||||
|
||||
fn hsetnx(&self, key: &str, field: &str, value: &str) -> Result<bool, DBError> {
|
||||
let mut storage_val = self.get_storage_value(key)?.unwrap_or(StorageValue {
|
||||
value: ValueType::Hash(HashMap::new()),
|
||||
expires_at: None,
|
||||
});
|
||||
|
||||
let hash = match &mut storage_val.value {
|
||||
ValueType::Hash(h) => h,
|
||||
_ => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
};
|
||||
|
||||
if hash.contains_key(field) {
|
||||
Ok(false)
|
||||
} else {
|
||||
hash.insert(field.to_string(), value.to_string());
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
// List operations
|
||||
fn lpush(&self, key: &str, elements: Vec<String>) -> Result<i64, DBError> {
|
||||
let mut storage_val = self.get_storage_value(key)?.unwrap_or(StorageValue {
|
||||
value: ValueType::List(Vec::new()),
|
||||
expires_at: None,
|
||||
});
|
||||
|
||||
let list = match &mut storage_val.value {
|
||||
ValueType::List(l) => l,
|
||||
_ => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
};
|
||||
|
||||
for element in elements.into_iter().rev() {
|
||||
list.insert(0, element);
|
||||
}
|
||||
|
||||
let len = list.len() as i64;
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
fn rpush(&self, key: &str, elements: Vec<String>) -> Result<i64, DBError> {
|
||||
let mut storage_val = self.get_storage_value(key)?.unwrap_or(StorageValue {
|
||||
value: ValueType::List(Vec::new()),
|
||||
expires_at: None,
|
||||
});
|
||||
|
||||
let list = match &mut storage_val.value {
|
||||
ValueType::List(l) => l,
|
||||
_ => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
};
|
||||
|
||||
list.extend(elements);
|
||||
let len = list.len() as i64;
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
fn lpop(&self, key: &str, count: u64) -> Result<Vec<String>, DBError> {
|
||||
let mut storage_val = match self.get_storage_value(key)? {
|
||||
Some(sv) => sv,
|
||||
None => return Ok(Vec::new())
|
||||
};
|
||||
|
||||
let list = match &mut storage_val.value {
|
||||
ValueType::List(l) => l,
|
||||
_ => return Ok(Vec::new())
|
||||
};
|
||||
|
||||
let mut result = Vec::new();
|
||||
for _ in 0..count.min(list.len() as u64) {
|
||||
if let Some(elem) = list.first() {
|
||||
result.push(elem.clone());
|
||||
list.remove(0);
|
||||
}
|
||||
}
|
||||
|
||||
if list.is_empty() {
|
||||
self.del(key.to_string())?;
|
||||
} else {
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn rpop(&self, key: &str, count: u64) -> Result<Vec<String>, DBError> {
|
||||
let mut storage_val = match self.get_storage_value(key)? {
|
||||
Some(sv) => sv,
|
||||
None => return Ok(Vec::new())
|
||||
};
|
||||
|
||||
let list = match &mut storage_val.value {
|
||||
ValueType::List(l) => l,
|
||||
_ => return Ok(Vec::new())
|
||||
};
|
||||
|
||||
let mut result = Vec::new();
|
||||
for _ in 0..count.min(list.len() as u64) {
|
||||
if let Some(elem) = list.pop() {
|
||||
result.push(elem);
|
||||
}
|
||||
}
|
||||
|
||||
if list.is_empty() {
|
||||
self.del(key.to_string())?;
|
||||
} else {
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn llen(&self, key: &str) -> Result<i64, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::List(l) => Ok(l.len() as i64),
|
||||
_ => Ok(0)
|
||||
}
|
||||
None => Ok(0)
|
||||
}
|
||||
}
|
||||
|
||||
fn lindex(&self, key: &str, index: i64) -> Result<Option<String>, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::List(list) => {
|
||||
let actual_index = if index < 0 {
|
||||
list.len() as i64 + index
|
||||
} else {
|
||||
index
|
||||
};
|
||||
|
||||
if actual_index >= 0 && (actual_index as usize) < list.len() {
|
||||
Ok(Some(list[actual_index as usize].clone()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
_ => Ok(None)
|
||||
}
|
||||
None => Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
fn lrange(&self, key: &str, start: i64, stop: i64) -> Result<Vec<String>, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => match storage_val.value {
|
||||
ValueType::List(list) => {
|
||||
if list.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let len = list.len() as i64;
|
||||
let start_idx = if start < 0 {
|
||||
std::cmp::max(0, len + start)
|
||||
} else {
|
||||
std::cmp::min(start, len)
|
||||
};
|
||||
let stop_idx = if stop < 0 {
|
||||
std::cmp::max(-1, len + stop)
|
||||
} else {
|
||||
std::cmp::min(stop, len - 1)
|
||||
};
|
||||
|
||||
if start_idx > stop_idx || start_idx >= len {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let start_usize = start_idx as usize;
|
||||
let stop_usize = (stop_idx + 1) as usize;
|
||||
|
||||
Ok(list[start_usize..std::cmp::min(stop_usize, list.len())].to_vec())
|
||||
}
|
||||
_ => Ok(Vec::new())
|
||||
}
|
||||
None => Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
fn ltrim(&self, key: &str, start: i64, stop: i64) -> Result<(), DBError> {
|
||||
let mut storage_val = match self.get_storage_value(key)? {
|
||||
Some(sv) => sv,
|
||||
None => return Ok(())
|
||||
};
|
||||
|
||||
let list = match &mut storage_val.value {
|
||||
ValueType::List(l) => l,
|
||||
_ => return Ok(())
|
||||
};
|
||||
|
||||
if list.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let len = list.len() as i64;
|
||||
let start_idx = if start < 0 {
|
||||
std::cmp::max(0, len + start)
|
||||
} else {
|
||||
std::cmp::min(start, len)
|
||||
};
|
||||
let stop_idx = if stop < 0 {
|
||||
std::cmp::max(-1, len + stop)
|
||||
} else {
|
||||
std::cmp::min(stop, len - 1)
|
||||
};
|
||||
|
||||
if start_idx > stop_idx || start_idx >= len {
|
||||
self.del(key.to_string())?;
|
||||
} else {
|
||||
let start_usize = start_idx as usize;
|
||||
let stop_usize = (stop_idx + 1) as usize;
|
||||
*list = list[start_usize..std::cmp::min(stop_usize, list.len())].to_vec();
|
||||
|
||||
if list.is_empty() {
|
||||
self.del(key.to_string())?;
|
||||
} else {
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn lrem(&self, key: &str, count: i64, element: &str) -> Result<i64, DBError> {
|
||||
let mut storage_val = match self.get_storage_value(key)? {
|
||||
Some(sv) => sv,
|
||||
None => return Ok(0)
|
||||
};
|
||||
|
||||
let list = match &mut storage_val.value {
|
||||
ValueType::List(l) => l,
|
||||
_ => return Ok(0)
|
||||
};
|
||||
|
||||
let mut removed = 0i64;
|
||||
|
||||
if count == 0 {
|
||||
// Remove all occurrences
|
||||
let original_len = list.len();
|
||||
list.retain(|x| x != element);
|
||||
removed = (original_len - list.len()) as i64;
|
||||
} else if count > 0 {
|
||||
// Remove first count occurrences
|
||||
let mut to_remove = count as usize;
|
||||
list.retain(|x| {
|
||||
if x == element && to_remove > 0 {
|
||||
to_remove -= 1;
|
||||
removed += 1;
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Remove last |count| occurrences
|
||||
let mut to_remove = (-count) as usize;
|
||||
for i in (0..list.len()).rev() {
|
||||
if list[i] == element && to_remove > 0 {
|
||||
list.remove(i);
|
||||
to_remove -= 1;
|
||||
removed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if list.is_empty() {
|
||||
self.del(key.to_string())?;
|
||||
} else {
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(removed)
|
||||
}
|
||||
|
||||
// Expiration
|
||||
fn ttl(&self, key: &str) -> Result<i64, DBError> {
|
||||
match self.get_storage_value(key)? {
|
||||
Some(storage_val) => {
|
||||
if let Some(expires_at) = storage_val.expires_at {
|
||||
let now = Self::now_millis();
|
||||
if now >= expires_at {
|
||||
Ok(-2) // Key has expired
|
||||
} else {
|
||||
Ok(((expires_at - now) / 1000) as i64) // TTL in seconds
|
||||
}
|
||||
} else {
|
||||
Ok(-1) // Key exists but has no expiration
|
||||
}
|
||||
}
|
||||
None => Ok(-2) // Key does not exist
|
||||
}
|
||||
}
|
||||
|
||||
fn expire_seconds(&self, key: &str, secs: u64) -> Result<bool, DBError> {
|
||||
let mut storage_val = match self.get_storage_value(key)? {
|
||||
Some(sv) => sv,
|
||||
None => return Ok(false)
|
||||
};
|
||||
|
||||
storage_val.expires_at = Some(Self::now_millis() + (secs as u128) * 1000);
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn pexpire_millis(&self, key: &str, ms: u128) -> Result<bool, DBError> {
|
||||
let mut storage_val = match self.get_storage_value(key)? {
|
||||
Some(sv) => sv,
|
||||
None => return Ok(false)
|
||||
};
|
||||
|
||||
storage_val.expires_at = Some(Self::now_millis() + ms);
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn persist(&self, key: &str) -> Result<bool, DBError> {
|
||||
let mut storage_val = match self.get_storage_value(key)? {
|
||||
Some(sv) => sv,
|
||||
None => return Ok(false)
|
||||
};
|
||||
|
||||
if storage_val.expires_at.is_some() {
|
||||
storage_val.expires_at = None;
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
fn expire_at_seconds(&self, key: &str, ts_secs: i64) -> Result<bool, DBError> {
|
||||
let mut storage_val = match self.get_storage_value(key)? {
|
||||
Some(sv) => sv,
|
||||
None => return Ok(false)
|
||||
};
|
||||
|
||||
let expires_at_ms: u128 = if ts_secs <= 0 { 0 } else { (ts_secs as u128) * 1000 };
|
||||
storage_val.expires_at = Some(expires_at_ms);
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn pexpire_at_millis(&self, key: &str, ts_ms: i64) -> Result<bool, DBError> {
|
||||
let mut storage_val = match self.get_storage_value(key)? {
|
||||
Some(sv) => sv,
|
||||
None => return Ok(false)
|
||||
};
|
||||
|
||||
let expires_at_ms: u128 = if ts_ms <= 0 { 0 } else { ts_ms as u128 };
|
||||
storage_val.expires_at = Some(expires_at_ms);
|
||||
self.set_storage_value(key, storage_val)?;
|
||||
self.db.flush().map_err(|e| DBError(e.to_string()))?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn is_encrypted(&self) -> bool {
|
||||
self.crypto.is_some()
|
||||
}
|
||||
|
||||
fn info(&self) -> Result<Vec<(String, String)>, DBError> {
|
||||
let dbsize = self.dbsize()?;
|
||||
Ok(vec![
|
||||
("db_size".to_string(), dbsize.to_string()),
|
||||
("is_encrypted".to_string(), self.is_encrypted().to_string()),
|
||||
])
|
||||
}
|
||||
|
||||
fn clone_arc(&self) -> Arc<dyn StorageBackend> {
|
||||
// Note: This is a simplified clone - in production you might want to
|
||||
// handle this differently as sled::Db is already Arc internally
|
||||
Arc::new(SledStorage {
|
||||
db: self.db.clone(),
|
||||
types: self.types.clone(),
|
||||
crypto: self.crypto.clone(),
|
||||
})
|
||||
}
|
||||
}
|
58
src/storage_trait.rs
Normal file
58
src/storage_trait.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
// src/storage_trait.rs
|
||||
use crate::error::DBError;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub trait StorageBackend: Send + Sync {
|
||||
// Basic key operations
|
||||
fn get(&self, key: &str) -> Result<Option<String>, DBError>;
|
||||
fn set(&self, key: String, value: String) -> Result<(), DBError>;
|
||||
fn setx(&self, key: String, value: String, expire_ms: u128) -> Result<(), DBError>;
|
||||
fn del(&self, key: String) -> Result<(), DBError>;
|
||||
fn exists(&self, key: &str) -> Result<bool, DBError>;
|
||||
fn keys(&self, pattern: &str) -> Result<Vec<String>, DBError>;
|
||||
fn dbsize(&self) -> Result<i64, DBError>;
|
||||
fn flushdb(&self) -> Result<(), DBError>;
|
||||
fn get_key_type(&self, key: &str) -> Result<Option<String>, DBError>;
|
||||
|
||||
// Scanning
|
||||
fn scan(&self, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError>;
|
||||
fn hscan(&self, key: &str, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError>;
|
||||
|
||||
// Hash operations
|
||||
fn hset(&self, key: &str, pairs: Vec<(String, String)>) -> Result<i64, DBError>;
|
||||
fn hget(&self, key: &str, field: &str) -> Result<Option<String>, DBError>;
|
||||
fn hgetall(&self, key: &str) -> Result<Vec<(String, String)>, DBError>;
|
||||
fn hdel(&self, key: &str, fields: Vec<String>) -> Result<i64, DBError>;
|
||||
fn hexists(&self, key: &str, field: &str) -> Result<bool, DBError>;
|
||||
fn hkeys(&self, key: &str) -> Result<Vec<String>, DBError>;
|
||||
fn hvals(&self, key: &str) -> Result<Vec<String>, DBError>;
|
||||
fn hlen(&self, key: &str) -> Result<i64, DBError>;
|
||||
fn hmget(&self, key: &str, fields: Vec<String>) -> Result<Vec<Option<String>>, DBError>;
|
||||
fn hsetnx(&self, key: &str, field: &str, value: &str) -> Result<bool, DBError>;
|
||||
|
||||
// List operations
|
||||
fn lpush(&self, key: &str, elements: Vec<String>) -> Result<i64, DBError>;
|
||||
fn rpush(&self, key: &str, elements: Vec<String>) -> Result<i64, DBError>;
|
||||
fn lpop(&self, key: &str, count: u64) -> Result<Vec<String>, DBError>;
|
||||
fn rpop(&self, key: &str, count: u64) -> Result<Vec<String>, DBError>;
|
||||
fn llen(&self, key: &str) -> Result<i64, DBError>;
|
||||
fn lindex(&self, key: &str, index: i64) -> Result<Option<String>, DBError>;
|
||||
fn lrange(&self, key: &str, start: i64, stop: i64) -> Result<Vec<String>, DBError>;
|
||||
fn ltrim(&self, key: &str, start: i64, stop: i64) -> Result<(), DBError>;
|
||||
fn lrem(&self, key: &str, count: i64, element: &str) -> Result<i64, DBError>;
|
||||
|
||||
// Expiration
|
||||
fn ttl(&self, key: &str) -> Result<i64, DBError>;
|
||||
fn expire_seconds(&self, key: &str, secs: u64) -> Result<bool, DBError>;
|
||||
fn pexpire_millis(&self, key: &str, ms: u128) -> Result<bool, DBError>;
|
||||
fn persist(&self, key: &str) -> Result<bool, DBError>;
|
||||
fn expire_at_seconds(&self, key: &str, ts_secs: i64) -> Result<bool, DBError>;
|
||||
fn pexpire_at_millis(&self, key: &str, ts_ms: i64) -> Result<bool, DBError>;
|
||||
|
||||
// Metadata
|
||||
fn is_encrypted(&self) -> bool;
|
||||
fn info(&self) -> Result<Vec<(String, String)>, DBError>;
|
||||
|
||||
// Clone to Arc for sharing
|
||||
fn clone_arc(&self) -> Arc<dyn StorageBackend>;
|
||||
}
|
@@ -1,6 +1,4 @@
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
@@ -29,22 +27,20 @@ async fn debug_hset_simple() {
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
backend: herodb::options::BackendType::Redb,
|
||||
};
|
||||
|
||||
let server = Arc::new(Mutex::new(Server::new(option).await));
|
||||
|
||||
let mut server = Server::new(option).await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let server_clone = Arc::clone(&server);
|
||||
tokio::spawn(async move {
|
||||
let _ = Server::handle(server_clone, stream).await;
|
||||
});
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
@@ -3,8 +3,6 @@ use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::time::sleep;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
#[tokio::test]
|
||||
async fn debug_hset_return_value() {
|
||||
@@ -20,22 +18,20 @@ async fn debug_hset_return_value() {
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
backend: herodb::options::BackendType::Redb,
|
||||
};
|
||||
|
||||
let server = Arc::new(Mutex::new(Server::new(option).await));
|
||||
|
||||
let mut server = Server::new(option).await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind("127.0.0.1:16390")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let server_clone = Arc::clone(&server);
|
||||
tokio::spawn(async move {
|
||||
let _ = Server::handle(server_clone, stream).await;
|
||||
});
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
@@ -1,32 +1,31 @@
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::time::sleep;
|
||||
|
||||
// Helper function to start a test server
|
||||
async fn start_test_server(test_name: &str) -> (Arc<Mutex<Server>>, u16) {
|
||||
async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(16379);
|
||||
|
||||
|
||||
let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
let test_dir = format!("/tmp/herodb_test_{}", test_name);
|
||||
|
||||
|
||||
// Clean up and create test directory
|
||||
let _ = std::fs::remove_dir_all(&test_dir);
|
||||
std::fs::create_dir_all(&test_dir).unwrap();
|
||||
|
||||
|
||||
let option = DBOption {
|
||||
dir: test_dir,
|
||||
port,
|
||||
debug: true,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
backend: herodb::options::BackendType::Redb,
|
||||
};
|
||||
|
||||
let server = Arc::new(Mutex::new(Server::new(option).await));
|
||||
|
||||
let server = Server::new(option).await;
|
||||
(server, port)
|
||||
}
|
||||
|
||||
@@ -56,7 +55,7 @@ async fn send_command(stream: &mut TcpStream, command: &str) -> String {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_ping() {
|
||||
let (server, port) = start_test_server("ping").await;
|
||||
let (mut server, port) = start_test_server("ping").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
@@ -66,7 +65,7 @@ async fn test_basic_ping() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -80,7 +79,7 @@ async fn test_basic_ping() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_string_operations() {
|
||||
let (server, port) = start_test_server("string").await;
|
||||
let (mut server, port) = start_test_server("string").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
@@ -90,7 +89,7 @@ async fn test_string_operations() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -122,7 +121,7 @@ async fn test_string_operations() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_incr_operations() {
|
||||
let (server, port) = start_test_server("incr").await;
|
||||
let (mut server, port) = start_test_server("incr").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -131,7 +130,7 @@ async fn test_incr_operations() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -156,7 +155,7 @@ async fn test_incr_operations() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hash_operations() {
|
||||
let (server, port) = start_test_server("hash").await;
|
||||
let (mut server, port) = start_test_server("hash").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -165,7 +164,7 @@ async fn test_hash_operations() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -222,7 +221,7 @@ async fn test_hash_operations() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_expiration() {
|
||||
let (server, port) = start_test_server("expiration").await;
|
||||
let (mut server, port) = start_test_server("expiration").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -231,7 +230,7 @@ async fn test_expiration() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -270,7 +269,7 @@ async fn test_expiration() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_scan_operations() {
|
||||
let (server, port) = start_test_server("scan").await;
|
||||
let (mut server, port) = start_test_server("scan").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -279,7 +278,7 @@ async fn test_scan_operations() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -306,7 +305,7 @@ async fn test_scan_operations() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hscan_operations() {
|
||||
let (server, port) = start_test_server("hscan").await;
|
||||
let (mut server, port) = start_test_server("hscan").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -315,7 +314,7 @@ async fn test_hscan_operations() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -338,7 +337,7 @@ async fn test_hscan_operations() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_transaction_operations() {
|
||||
let (server, port) = start_test_server("transaction").await;
|
||||
let (mut server, port) = start_test_server("transaction").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -347,7 +346,7 @@ async fn test_transaction_operations() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -381,7 +380,7 @@ async fn test_transaction_operations() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_discard_transaction() {
|
||||
let (server, port) = start_test_server("discard").await;
|
||||
let (mut server, port) = start_test_server("discard").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -390,7 +389,7 @@ async fn test_discard_transaction() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -418,7 +417,7 @@ async fn test_discard_transaction() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_type_command() {
|
||||
let (server, port) = start_test_server("type").await;
|
||||
let (mut server, port) = start_test_server("type").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -427,7 +426,7 @@ async fn test_type_command() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -453,7 +452,7 @@ async fn test_type_command() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_config_commands() {
|
||||
let (server, port) = start_test_server("config").await;
|
||||
let (mut server, port) = start_test_server("config").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -462,7 +461,7 @@ async fn test_config_commands() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -484,7 +483,7 @@ async fn test_config_commands() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_info_command() {
|
||||
let (server, port) = start_test_server("info").await;
|
||||
let (mut server, port) = start_test_server("info").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -493,7 +492,7 @@ async fn test_info_command() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -513,7 +512,7 @@ async fn test_info_command() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_error_handling() {
|
||||
let (server, port) = start_test_server("error").await;
|
||||
let (mut server, port) = start_test_server("error").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -522,7 +521,7 @@ async fn test_error_handling() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -551,7 +550,7 @@ async fn test_error_handling() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_list_operations() {
|
||||
let (server, port) = start_test_server("list").await;
|
||||
let (mut server, port) = start_test_server("list").await;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
@@ -560,7 +559,7 @@ async fn test_list_operations() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
@@ -1,4 +1,9 @@
|
||||
use herodb::rpc::{BackendType, DatabaseConfig};
|
||||
use std::net::SocketAddr;
|
||||
use jsonrpsee::http_client::HttpClientBuilder;
|
||||
use jsonrpsee::core::client::ClientT;
|
||||
use serde_json::json;
|
||||
|
||||
use herodb::rpc::{RpcClient, BackendType, DatabaseConfig};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rpc_server_basic() {
|
||||
@@ -34,4 +39,24 @@ async fn test_database_config_serialization() {
|
||||
assert_eq!(json["name"], "my_db");
|
||||
assert_eq!(json["max_size"], 1000000);
|
||||
assert_eq!(json["redis_version"], "7.0");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_backend_type_serialization() {
|
||||
// Test that both Redb and Sled backends serialize correctly
|
||||
let redb_backend = BackendType::Redb;
|
||||
let sled_backend = BackendType::Sled;
|
||||
|
||||
let redb_json = serde_json::to_string(&redb_backend).unwrap();
|
||||
let sled_json = serde_json::to_string(&sled_backend).unwrap();
|
||||
|
||||
assert_eq!(redb_json, "\"Redb\"");
|
||||
assert_eq!(sled_json, "\"Sled\"");
|
||||
|
||||
// Test deserialization
|
||||
let redb_deserialized: BackendType = serde_json::from_str(&redb_json).unwrap();
|
||||
let sled_deserialized: BackendType = serde_json::from_str(&sled_json).unwrap();
|
||||
|
||||
assert!(matches!(redb_deserialized, BackendType::Redb));
|
||||
assert!(matches!(sled_deserialized, BackendType::Sled));
|
||||
}
|
@@ -1,34 +1,33 @@
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
// Helper function to start a test server with clean data directory
|
||||
async fn start_test_server(test_name: &str) -> (Arc<Mutex<Server>>, u16) {
|
||||
async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(17000);
|
||||
|
||||
|
||||
// Get a unique port for this test
|
||||
let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
|
||||
let test_dir = format!("/tmp/herodb_test_{}", test_name);
|
||||
|
||||
|
||||
// Clean up any existing test data
|
||||
let _ = std::fs::remove_dir_all(&test_dir);
|
||||
std::fs::create_dir_all(&test_dir).unwrap();
|
||||
|
||||
|
||||
let option = DBOption {
|
||||
dir: test_dir,
|
||||
port,
|
||||
debug: true,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
backend: herodb::options::BackendType::Redb,
|
||||
};
|
||||
|
||||
let server = Arc::new(Mutex::new(Server::new(option).await));
|
||||
|
||||
let server = Server::new(option).await;
|
||||
(server, port)
|
||||
}
|
||||
|
||||
@@ -44,7 +43,7 @@ async fn send_redis_command(port: u16, command: &str) -> String {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_redis_functionality() {
|
||||
let (server, port) = start_test_server("basic").await;
|
||||
let (mut server, port) = start_test_server("basic").await;
|
||||
|
||||
// Start server in background with timeout
|
||||
let server_handle = tokio::spawn(async move {
|
||||
@@ -55,7 +54,7 @@ async fn test_basic_redis_functionality() {
|
||||
// Accept only a few connections for testing
|
||||
for _ in 0..10 {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -113,7 +112,7 @@ async fn test_basic_redis_functionality() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hash_operations() {
|
||||
let (server, port) = start_test_server("hash_ops").await;
|
||||
let (mut server, port) = start_test_server("hash_ops").await;
|
||||
|
||||
// Start server in background with timeout
|
||||
let server_handle = tokio::spawn(async move {
|
||||
@@ -124,7 +123,7 @@ async fn test_hash_operations() {
|
||||
// Accept only a few connections for testing
|
||||
for _ in 0..5 {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -167,7 +166,7 @@ async fn test_hash_operations() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_transaction_operations() {
|
||||
let (server, port) = start_test_server("transactions").await;
|
||||
let (mut server, port) = start_test_server("transactions").await;
|
||||
|
||||
// Start server in background with timeout
|
||||
let server_handle = tokio::spawn(async move {
|
||||
@@ -178,7 +177,7 @@ async fn test_transaction_operations() {
|
||||
// Accept only a few connections for testing
|
||||
for _ in 0..5 {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let _ = Server::handle(Arc::clone(&server), stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
@@ -1,32 +1,31 @@
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::time::sleep;
|
||||
|
||||
// Helper function to start a test server with clean data directory
|
||||
async fn start_test_server(test_name: &str) -> (Arc<Mutex<Server>>, u16) {
|
||||
async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(16500);
|
||||
|
||||
|
||||
let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
let test_dir = format!("/tmp/herodb_simple_test_{}", test_name);
|
||||
|
||||
|
||||
// Clean up any existing test data
|
||||
let _ = std::fs::remove_dir_all(&test_dir);
|
||||
std::fs::create_dir_all(&test_dir).unwrap();
|
||||
|
||||
|
||||
let option = DBOption {
|
||||
dir: test_dir,
|
||||
port,
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
backend: herodb::options::BackendType::Redb,
|
||||
};
|
||||
|
||||
let server = Arc::new(Mutex::new(Server::new(option).await));
|
||||
|
||||
let server = Server::new(option).await;
|
||||
(server, port)
|
||||
}
|
||||
|
||||
@@ -56,7 +55,7 @@ async fn connect_to_server(port: u16) -> TcpStream {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_ping_simple() {
|
||||
let (server, port) = start_test_server("ping").await;
|
||||
let (mut server, port) = start_test_server("ping").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
@@ -66,8 +65,7 @@ async fn test_basic_ping_simple() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let server_clone = Arc::clone(&server);
|
||||
let _ = Server::handle(server_clone, stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -81,7 +79,7 @@ async fn test_basic_ping_simple() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hset_clean_db() {
|
||||
let (server, port) = start_test_server("hset_clean").await;
|
||||
let (mut server, port) = start_test_server("hset_clean").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
@@ -91,8 +89,7 @@ async fn test_hset_clean_db() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let server_clone = Arc::clone(&server);
|
||||
let _ = Server::handle(server_clone, stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -114,7 +111,7 @@ async fn test_hset_clean_db() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_type_command_simple() {
|
||||
let (server, port) = start_test_server("type").await;
|
||||
let (mut server, port) = start_test_server("type").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
@@ -124,8 +121,7 @@ async fn test_type_command_simple() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let server_clone = Arc::clone(&server);
|
||||
let _ = Server::handle(server_clone, stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -154,7 +150,7 @@ async fn test_type_command_simple() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hexists_simple() {
|
||||
let (server, port) = start_test_server("hexists").await;
|
||||
let (mut server, port) = start_test_server("hexists").await;
|
||||
|
||||
// Start server in background
|
||||
tokio::spawn(async move {
|
||||
@@ -164,8 +160,7 @@ async fn test_hexists_simple() {
|
||||
|
||||
loop {
|
||||
if let Ok((stream, _)) = listener.accept().await {
|
||||
let server_clone = Arc::clone(&server);
|
||||
let _ = Server::handle(server_clone, stream).await;
|
||||
let _ = server.handle(stream).await;
|
||||
}
|
||||
}
|
||||
});
|
@@ -2,14 +2,12 @@ use herodb::{options::DBOption, server::Server};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::time::{sleep, Duration};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
// =========================
|
||||
// Helpers
|
||||
// =========================
|
||||
|
||||
async fn start_test_server(test_name: &str) -> (Arc<Mutex<Server>>, u16) {
|
||||
async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
use std::sync::atomic::{AtomicU16, Ordering};
|
||||
static PORT_COUNTER: AtomicU16 = AtomicU16::new(17100);
|
||||
let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst);
|
||||
@@ -24,13 +22,14 @@ async fn start_test_server(test_name: &str) -> (Arc<Mutex<Server>>, u16) {
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
backend: herodb::options::BackendType::Redb,
|
||||
};
|
||||
|
||||
let server = Arc::new(Mutex::new(Server::new(option).await));
|
||||
let server = Server::new(option).await;
|
||||
(server, port)
|
||||
}
|
||||
|
||||
async fn spawn_listener(server: Arc<Mutex<Server>>, port: u16) {
|
||||
async fn spawn_listener(server: Server, port: u16) {
|
||||
tokio::spawn(async move {
|
||||
let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port))
|
||||
.await
|
||||
@@ -38,9 +37,9 @@ async fn spawn_listener(server: Arc<Mutex<Server>>, port: u16) {
|
||||
loop {
|
||||
match listener.accept().await {
|
||||
Ok((stream, _)) => {
|
||||
let server_clone = Arc::clone(&server);
|
||||
let mut s_clone = server.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ = Server::handle(server_clone, stream).await;
|
||||
let _ = s_clone.handle(stream).await;
|
||||
});
|
||||
}
|
||||
Err(_e) => break,
|
||||
@@ -502,11 +501,11 @@ async fn test_07_age_stateless_suite() {
|
||||
let mut s = connect(port).await;
|
||||
|
||||
// GENENC -> [recipient, identity]
|
||||
let gen_result = send_cmd(&mut s, &["AGE", "GENENC"]).await;
|
||||
let genenc = send_cmd(&mut s, &["AGE", "GENENC"]).await;
|
||||
assert!(
|
||||
gen_result.starts_with("*2\r\n$"),
|
||||
genenc.starts_with("*2\r\n$"),
|
||||
"AGE GENENC should return array [recipient, identity], got:\n{}",
|
||||
gen_result
|
||||
genenc
|
||||
);
|
||||
|
||||
// Parse simple RESP array of two bulk strings to extract keys
|
||||
@@ -521,7 +520,7 @@ async fn test_07_age_stateless_suite() {
|
||||
let ident = lines.next().unwrap_or("").to_string();
|
||||
(recip, ident)
|
||||
}
|
||||
let (recipient, identity) = parse_two_bulk_array(&gen_result);
|
||||
let (recipient, identity) = parse_two_bulk_array(&genenc);
|
||||
assert!(
|
||||
recipient.starts_with("age1") && identity.starts_with("AGE-SECRET-KEY-1"),
|
||||
"Unexpected AGE key formats.\nrecipient: {}\nidentity: {}",
|
Reference in New Issue
Block a user