Compare commits
15 Commits
0511dddd99
...
main
Author | SHA1 | Date | |
---|---|---|---|
9177fa4091 | |||
51ab90c4ad | |||
30a09e6d53 | |||
542996a0ff | |||
63ab39b4b1 | |||
ee94d731d7 | |||
c7945624bd | |||
f8dd304820 | |||
5eab3b080c | |||
246304b9fa | |||
074be114c3 | |||
e51af83e45 | |||
dbd0635cd9 | |||
0000d82799 | |||
5502ff4bc5 |
1371
Cargo.lock
generated
1371
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
30
Cargo.toml
30
Cargo.toml
@@ -1,20 +1,12 @@
|
||||
[package]
|
||||
name = "redis-rs"
|
||||
version = "0.0.1"
|
||||
authors = ["Pin Fang <fpfangpin@hotmail.com>"]
|
||||
edition = "2021"
|
||||
[workspace]
|
||||
members = [
|
||||
"herodb",
|
||||
"supervisor",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.59"
|
||||
bytes = "1.3.0"
|
||||
thiserror = "1.0.32"
|
||||
tokio = { version = "1.23.0", features = ["full"] }
|
||||
clap = { version = "4.5.20", features = ["derive"] }
|
||||
byteorder = "1.4.3"
|
||||
futures = "0.3"
|
||||
redb = "2.1.3"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
bincode = "1.3.3"
|
||||
|
||||
[dev-dependencies]
|
||||
redis = "0.24"
|
||||
# You can define shared profiles for all workspace members here
|
||||
[profile.release]
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
strip = true
|
267
README.md
267
README.md
@@ -1,267 +0,0 @@
|
||||
# Build Your Own Redis in Rust
|
||||
|
||||
This project is to build a toy Redis-Server clone that's capable of parsing Redis protocol and handling basic Redis commands, parsing and initializing Redis from RDB file,
|
||||
supporting leader-follower replication, redis streams (queue), redis batch commands in transaction.
|
||||
|
||||
You can find all the source code and commit history in [my github repo](https://github.com/fangpin/redis-rs).
|
||||
|
||||
## Main features
|
||||
+ Parse Redis protocol
|
||||
+ Handle basic Redis commands
|
||||
+ Parse and initialize Redis from RDB file
|
||||
+ Leader-follower Replication
|
||||
|
||||
## Prerequisites
|
||||
install `redis-cli` first (an implementation of redis client for test purpose)
|
||||
```sh
|
||||
cargo install mini-redis
|
||||
```
|
||||
|
||||
Learn about:
|
||||
- [Redis protocoal](https://redis.io/docs/latest/develop/reference/protocol-spec)
|
||||
- [RDB file format](https://rdb.fnordig.de/file_format.html)
|
||||
- [Redis replication](https://redis.io/docs/management/replication/)
|
||||
|
||||
## Start the Redis-rs server
|
||||
```sh
|
||||
# start as master
|
||||
cargo run -- --dir /some/db/path --dbfilename dump.rdb
|
||||
# start as slave
|
||||
cargo run -- --dir /some/db/path --dbfilename dump.rdb --port 6380 --replicaof "localhost 6379"
|
||||
```
|
||||
|
||||
|
||||
## Supported Commands
|
||||
```sh
|
||||
# basic commands
|
||||
redis-cli PING
|
||||
redis-cli ECHO hey
|
||||
redis-cli SET foo bar
|
||||
redis-cli SET foo bar px/ex 100
|
||||
redis-cli GET foo
|
||||
redis-cli SET foo 2
|
||||
redis-cli INCR foo
|
||||
redis-cli INCR missing_key
|
||||
redis-cli TYPE some_key
|
||||
redis-cli KEYS "*"
|
||||
|
||||
# leader-follower replication related commands
|
||||
redis-cli CONFIG GET dbfilename
|
||||
redis-cli INFO replication
|
||||
|
||||
# streams related commands
|
||||
redis-cli XADD stream_key 1526919030474-0 temperature 36 humidity 95
|
||||
redis-cli XADD stream_key 1526919030474-* temperature 37 humidity 94
|
||||
redis-cli XADD stream_key "*" foo bar
|
||||
## read stream
|
||||
redis-cli XRANGE stream_key 0-2 0-3
|
||||
## query with + -
|
||||
redis-cli XRANGE some_key - 1526985054079
|
||||
## query single stream using xread
|
||||
redis-cli XREAD streams some_key 1526985054069-0
|
||||
## query multiple stream using xread
|
||||
redis-cli XREAD streams stream_key other_stream_key 0-0 0-1
|
||||
## blocking reads without timeout
|
||||
redis-cli XREAD block 0 streams some_key 1526985054069-0
|
||||
|
||||
|
||||
# transactions related commands
|
||||
## start a transaction and exec all queued commands in a transaction
|
||||
redis-cli
|
||||
> MULTI
|
||||
> set foo 1
|
||||
> incr foo
|
||||
> exec
|
||||
## start a transaction and queued commands and cancel transaction then
|
||||
redis-cli
|
||||
> MULTI
|
||||
> set foo 1
|
||||
> incr foo
|
||||
> discard
|
||||
|
||||
```
|
||||
|
||||
## RDB Persistence
|
||||
Get Redis-rs server config
|
||||
```sh
|
||||
redis-cli CONFIG GET dbfilename
|
||||
```
|
||||
### RDB file format overview
|
||||
Here are the different sections of the [RDB file](https://rdb.fnordig.de/file_format.html), in order:
|
||||
+ Header section
|
||||
+ Metadata section
|
||||
+ Database section
|
||||
+ End of file section
|
||||
#### Header section
|
||||
start with some magic number
|
||||
```sh
|
||||
52 45 44 49 53 30 30 31 31 // Magic string + version number (ASCII): "REDIS0011".
|
||||
```
|
||||
#### Metadata section
|
||||
contains zero or more "metadata subsections", which each specify a single metadata attribute
|
||||
e.g.
|
||||
```sh
|
||||
FA // Indicates the start of a metadata subsection.
|
||||
09 72 65 64 69 73 2D 76 65 72 // The name of the metadata attribute (string encoded): "redis-ver".
|
||||
06 36 2E 30 2E 31 36 // The value of the metadata attribute (string encoded): "6.0.16".
|
||||
```
|
||||
#### Database section
|
||||
contains zero or more "database subsections," which each describe a single database.
|
||||
e.g.
|
||||
```sh
|
||||
FE // Indicates the start of a database subsection.
|
||||
00 /* The index of the database (size encoded). Here, the index is 0. */
|
||||
|
||||
FB // Indicates that hash table size information follows.
|
||||
03 /* The size of the hash table that stores the keys and values (size encoded). Here, the total key-value hash table size is 3. */
|
||||
02 /* The size of the hash table that stores the expires of the keys (size encoded). Here, the number of keys with an expiry is 2. */
|
||||
```
|
||||
|
||||
```sh
|
||||
00 /* The 1-byte flag that specifies the value’s type and encoding. Here, the flag is 0, which means "string." */
|
||||
06 66 6F 6F 62 61 72 // The name of the key (string encoded). Here, it's "foobar".
|
||||
06 62 61 7A 71 75 78 // The value (string encoded). Here, it's "bazqux".
|
||||
```
|
||||
|
||||
```sh
|
||||
FC /* Indicates that this key ("foo") has an expire, and that the expire timestamp is expressed in milliseconds. */
|
||||
15 72 E7 07 8F 01 00 00 /* The expire timestamp, expressed in Unix time, stored as an 8-byte unsigned long, in little-endian (read right-to-left). Here, the expire timestamp is 1713824559637. */
|
||||
00 // Value type is string.
|
||||
03 66 6F 6F // Key name is "foo".
|
||||
03 62 61 72 // Value is "bar".
|
||||
```
|
||||
|
||||
```sh
|
||||
FD /* Indicates that this key ("baz") has an expire, and that the expire timestamp is expressed in seconds. */
|
||||
52 ED 2A 66 /* The expire timestamp, expressed in Unix time, stored as an 4-byte unsigned integer, in little-endian (read right-to-left). Here, the expire timestamp is 1714089298. */
|
||||
00 // Value type is string.
|
||||
03 62 61 7A // Key name is "baz".
|
||||
03 71 75 78 // Value is "qux".
|
||||
```
|
||||
|
||||
In summary,
|
||||
- Optional expire information (one of the following):
|
||||
- Timestamp in seconds:
|
||||
- FD
|
||||
- Expire timestamp in seconds (4-byte unsigned integer)
|
||||
- Timestamp in milliseconds:
|
||||
- FC
|
||||
- Expire timestamp in milliseconds (8-byte unsigned long)
|
||||
- Value type (1-byte flag)
|
||||
- Key (string encoded)
|
||||
- Value (encoding depends on value type)
|
||||
|
||||
#### End of file section
|
||||
```sh
|
||||
FF /* Indicates that the file is ending, and that the checksum follows. */
|
||||
89 3b b7 4e f8 0f 77 19 // An 8-byte CRC64 checksum of the entire file.
|
||||
```
|
||||
|
||||
#### Size encoding
|
||||
```sh
|
||||
/* If the first two bits are 0b00:
|
||||
The size is the remaining 6 bits of the byte.
|
||||
In this example, the size is 10: */
|
||||
0A
|
||||
00001010
|
||||
|
||||
/* If the first two bits are 0b01:
|
||||
The size is the next 14 bits
|
||||
(remaining 6 bits in the first byte, combined with the next byte),
|
||||
in big-endian (read left-to-right).
|
||||
In this example, the size is 700: */
|
||||
42 BC
|
||||
01000010 10111100
|
||||
|
||||
/* If the first two bits are 0b10:
|
||||
Ignore the remaining 6 bits of the first byte.
|
||||
The size is the next 4 bytes, in big-endian (read left-to-right).
|
||||
In this example, the size is 17000: */
|
||||
80 00 00 42 68
|
||||
10000000 00000000 00000000 01000010 01101000
|
||||
|
||||
/* If the first two bits are 0b11:
|
||||
The remaining 6 bits specify a type of string encoding.
|
||||
See string encoding section. */
|
||||
```
|
||||
|
||||
#### String encoding
|
||||
+ The size of the string (size encoded).
|
||||
+ The string.
|
||||
```sh
|
||||
/* The 0x0D size specifies that the string is 13 characters long. The remaining characters spell out "Hello, World!". */
|
||||
0D 48 65 6C 6C 6F 2C 20 57 6F 72 6C 64 21
|
||||
```
|
||||
For sizes that begin with 0b11, the remaining 6 bits indicate a type of string format:
|
||||
```sh
|
||||
/* The 0xC0 size indicates the string is an 8-bit integer. In this example, the string is "123". */
|
||||
C0 7B
|
||||
|
||||
/* The 0xC1 size indicates the string is a 16-bit integer. The remaining bytes are in little-endian (read right-to-left). In this example, the string is "12345". */
|
||||
C1 39 30
|
||||
|
||||
/* The 0xC2 size indicates the string is a 32-bit integer. The remaining bytes are in little-endian (read right-to-left), In this example, the string is "1234567". */
|
||||
C2 87 D6 12 00
|
||||
|
||||
/* The 0xC3 size indicates that the string is compressed with the LZF algorithm. You will not encounter LZF-compressed strings in this challenge. */
|
||||
C3 ...
|
||||
```
|
||||
|
||||
## Replication
|
||||
Redis server [leader-follower replication](https://redis.io/docs/management/replication/).
|
||||
Run multiple Redis servers with one acting as the "master" and the others as "replicas". Changes made to the master will be automatically replicated to replicas.
|
||||
|
||||
### Send Handshake (follower -> master)
|
||||
1. When the follower starts, it will send a PING command to the master as RESP Array.
|
||||
2. Then 2 REPLCONF (replication config) commands are sent to master from follower to communicate the port and the sync protocol. One is *REPLCONF listening-port <PORT>* and the other is *REPLCONF capa psync2*. psnync2 is an example sync protocol supported in this project.
|
||||
3. The follower sends the *PSYNC* command to master with replication id and offset to start the replication process.
|
||||
|
||||
### Receive Handshake (master -> follower)
|
||||
1. Response a PONG message to follower.
|
||||
2. Response an OK message to follower for both REPLCONF commands.
|
||||
3. Response a *+FULLRESYNC <REPL_ID> 0\r\n* to follower with the replication id and offset.
|
||||
|
||||
### RDB file transfer
|
||||
When the follower starts, it sends a *PSYNC ? -1* command to tell master that it doesn't have any data yet, and needs a full resync.
|
||||
|
||||
Then the master send a *FULLRESYNC* response to the follower as an acknowledgement.
|
||||
|
||||
Finally, the master send the RDB file to represent its current state to the follower. The follower should load the RDB file received to the memory, replacing its current state.
|
||||
|
||||
### Receive write commands (master -> follower)
|
||||
The master sends following write commands to the follower with the offset info.
|
||||
The sending is to reuse the same TCP connection of handshake and RDB file transfer.
|
||||
As the all the commands are encoded as RESP Array just like a normal client command, so the follower could reuse the same logic to handler the replicate commands from master. The only difference is the commands are coming from the master and no need response back.
|
||||
|
||||
|
||||
## Streams
|
||||
A stream is identified by a key, and it contains multiple entries.
|
||||
Each entry consists of one or more key-value pairs, and is assigned a unique ID.
|
||||
[More about redis streams](https://redis.io/docs/latest/develop/data-types/streams/)
|
||||
[Radix tree](https://en.wikipedia.org/wiki/Radix_tree)
|
||||
|
||||
|
||||
It looks like a list of key-value pairs.
|
||||
```sh
|
||||
entries:
|
||||
- id: 1526985054069-0 # (ID of the first entry)
|
||||
temperature: 36 # (A key value pair in the first entry)
|
||||
humidity: 95 # (Another key value pair in the first entry)
|
||||
|
||||
- id: 1526985054079-0 # (ID of the second entry)
|
||||
temperature: 37 # (A key value pair in the first entry)
|
||||
humidity: 94 # (Another key value pair in the first entry)
|
||||
|
||||
# ... (and so on)
|
||||
|
||||
```
|
||||
|
||||
Examples of Redis stream use cases include:
|
||||
- Event sourcing (e.g., tracking user actions, clicks, etc.)
|
||||
- Sensor monitoring (e.g., readings from devices in the field)
|
||||
- Notifications (e.g., storing a record of each user's notifications in a separate stream)
|
||||
|
||||
## Transaction
|
||||
When *MULTI* command is called in a connection, redis just queued all following commands until *EXEC* or *DISCARD* command is called.
|
||||
*EXEC* command will execute all queued commands and return an array representation of all execution result (including), instead the *DISCARD* command just clear all queued commands.
|
||||
The transactions among each client connection are independent.
|
28
herodb/Cargo.toml
Normal file
28
herodb/Cargo.toml
Normal file
@@ -0,0 +1,28 @@
|
||||
[package]
|
||||
name = "herodb"
|
||||
version = "0.0.1"
|
||||
authors = ["Pin Fang <fpfangpin@hotmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.59"
|
||||
bytes = "1.3.0"
|
||||
thiserror = "1.0.32"
|
||||
tokio = { version = "1.23.0", features = ["full"] }
|
||||
clap = { version = "4.5.20", features = ["derive"] }
|
||||
byteorder = "1.4.3"
|
||||
futures = "0.3"
|
||||
redb = "2.1.3"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
bincode = "1.3.3"
|
||||
chacha20poly1305 = "0.10.1"
|
||||
rand = "0.8"
|
||||
sha2 = "0.10"
|
||||
age = "0.10"
|
||||
secrecy = "0.8"
|
||||
ed25519-dalek = "2"
|
||||
base64 = "0.22"
|
||||
|
||||
[dev-dependencies]
|
||||
redis = { version = "0.24", features = ["aio", "tokio-comp"] }
|
0
herodb/README.md
Normal file
0
herodb/README.md
Normal file
9
herodb/build.sh
Executable file
9
herodb/build.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
export SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
echo "I am in $SCRIPT_DIR"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
cargo build
|
||||
|
71
herodb/examples/age_bash_demo.sh
Executable file
71
herodb/examples/age_bash_demo.sh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Start the herodb server in the background
|
||||
echo "Starting herodb server..."
|
||||
cargo run -p herodb -- --dir /tmp/herodb_age_test --port 6382 --debug --encryption-key "testkey" &
|
||||
SERVER_PID=$!
|
||||
sleep 2 # Give the server a moment to start
|
||||
|
||||
REDIS_CLI="redis-cli -p 6382"
|
||||
|
||||
echo "--- Generating and Storing Encryption Keys ---"
|
||||
# The new AGE commands are 'AGE KEYGEN <name>' etc., based on src/cmd.rs
|
||||
# This script uses older commands like 'AGE.GENERATE_KEYPAIR alice'
|
||||
# The demo script needs to be updated to match the implemented commands.
|
||||
# Let's assume the commands in the script are what's expected for now,
|
||||
# but note this discrepancy. The new commands are AGE KEYGEN etc.
|
||||
# The script here uses a different syntax not found in src/cmd.rs like 'AGE.GENERATE_KEYPAIR'.
|
||||
# For now, I will modify the script to fit the actual implementation.
|
||||
|
||||
echo "--- Generating and Storing Encryption Keys ---"
|
||||
$REDIS_CLI AGE KEYGEN alice
|
||||
$REDIS_CLI AGE KEYGEN bob
|
||||
|
||||
echo "--- Encrypting and Decrypting a Message ---"
|
||||
MESSAGE="Hello, AGE encryption!"
|
||||
# The new logic stores keys internally and does not expose a command to get the public key.
|
||||
# We will encrypt by name.
|
||||
ALICE_PUBKEY_REPLY=$($REDIS_CLI AGE KEYGEN alice | head -n 2 | tail -n 1)
|
||||
echo "Alice's Public Key: $ALICE_PUBKEY_REPLY"
|
||||
|
||||
echo "Encrypting message: '$MESSAGE' with Alice's identity..."
|
||||
# AGE.ENCRYPT recipient message. But since we use persistent keys, let's use ENCRYPTNAME
|
||||
CIPHERTEXT=$($REDIS_CLI AGE ENCRYPTNAME alice "$MESSAGE")
|
||||
echo "Ciphertext: $CIPHERTEXT"
|
||||
|
||||
echo "Decrypting ciphertext with Alice's private key..."
|
||||
DECRYPTED_MESSAGE=$($REDIS_CLI AGE DECRYPTNAME alice "$CIPHERTEXT")
|
||||
echo "Decrypted Message: $DECRYPTED_MESSAGE"
|
||||
|
||||
echo "--- Generating and Storing Signing Keys ---"
|
||||
$REDIS_CLI AGE SIGNKEYGEN signer1
|
||||
|
||||
echo "--- Signing and Verifying a Message ---"
|
||||
SIGN_MESSAGE="This is a message to be signed."
|
||||
# Similar to above, we don't have GET_SIGN_PUBKEY. We will verify by name.
|
||||
|
||||
echo "Signing message: '$SIGN_MESSAGE' with signer1's private key..."
|
||||
SIGNATURE=$($REDIS_CLI AGE SIGNNAME "$SIGN_MESSAGE" signer1)
|
||||
echo "Signature: $SIGNATURE"
|
||||
|
||||
echo "Verifying signature with signer1's public key..."
|
||||
VERIFY_RESULT=$($REDIS_CLI AGE VERIFYNAME signer1 "$SIGN_MESSAGE" "$SIGNATURE")
|
||||
echo "Verification Result: $VERIFY_RESULT"
|
||||
|
||||
|
||||
# There is no DELETE_KEYPAIR command in the implementation
|
||||
echo "--- Cleaning up keys (manual in herodb) ---"
|
||||
# We would use DEL for age:key:alice, etc.
|
||||
$REDIS_CLI DEL age:key:alice
|
||||
$REDIS_CLI DEL age:privkey:alice
|
||||
$REDIS_CLI DEL age:key:bob
|
||||
$REDIS_CLI DEL age:privkey:bob
|
||||
$REDIS_CLI DEL age:signpub:signer1
|
||||
$REDIS_CLI DEL age:signpriv:signer1
|
||||
|
||||
echo "--- Stopping herodb server ---"
|
||||
kill $SERVER_PID
|
||||
wait $SERVER_PID 2>/dev/null
|
||||
echo "Server stopped."
|
||||
|
||||
echo "Bash demo complete."
|
83
herodb/examples/age_persist_demo.rs
Normal file
83
herodb/examples/age_persist_demo.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use std::io::{Read, Write};
|
||||
use std::net::TcpStream;
|
||||
|
||||
// Minimal RESP helpers
|
||||
fn arr(parts: &[&str]) -> String {
|
||||
let mut out = format!("*{}\r\n", parts.len());
|
||||
for p in parts {
|
||||
out.push_str(&format!("${}\r\n{}\r\n", p.len(), p));
|
||||
}
|
||||
out
|
||||
}
|
||||
fn read_reply(s: &mut TcpStream) -> String {
|
||||
let mut buf = [0u8; 65536];
|
||||
let n = s.read(&mut buf).unwrap();
|
||||
String::from_utf8_lossy(&buf[..n]).to_string()
|
||||
}
|
||||
fn parse_two_bulk(reply: &str) -> Option<(String,String)> {
|
||||
let mut lines = reply.split("\r\n");
|
||||
if lines.next()? != "*2" { return None; }
|
||||
let _n = lines.next()?;
|
||||
let a = lines.next()?.to_string();
|
||||
let _m = lines.next()?;
|
||||
let b = lines.next()?.to_string();
|
||||
Some((a,b))
|
||||
}
|
||||
fn parse_bulk(reply: &str) -> Option<String> {
|
||||
let mut lines = reply.split("\r\n");
|
||||
let hdr = lines.next()?;
|
||||
if !hdr.starts_with('$') { return None; }
|
||||
Some(lines.next()?.to_string())
|
||||
}
|
||||
fn parse_simple(reply: &str) -> Option<String> {
|
||||
let mut lines = reply.split("\r\n");
|
||||
let hdr = lines.next()?;
|
||||
if !hdr.starts_with('+') { return None; }
|
||||
Some(hdr[1..].to_string())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut args = std::env::args().skip(1);
|
||||
let host = args.next().unwrap_or_else(|| "127.0.0.1".into());
|
||||
let port = args.next().unwrap_or_else(|| "6379".into());
|
||||
let addr = format!("{host}:{port}");
|
||||
println!("Connecting to {addr}...");
|
||||
let mut s = TcpStream::connect(addr).expect("connect");
|
||||
|
||||
// Generate & persist X25519 enc keys under name "alice"
|
||||
s.write_all(arr(&["age","keygen","alice"]).as_bytes()).unwrap();
|
||||
let (_alice_recip, _alice_ident) = parse_two_bulk(&read_reply(&mut s)).expect("gen enc");
|
||||
|
||||
// Generate & persist Ed25519 signing key under name "signer"
|
||||
s.write_all(arr(&["age","signkeygen","signer"]).as_bytes()).unwrap();
|
||||
let (_verify, _secret) = parse_two_bulk(&read_reply(&mut s)).expect("gen sign");
|
||||
|
||||
// Encrypt by name
|
||||
let msg = "hello from persistent keys";
|
||||
s.write_all(arr(&["age","encryptname","alice", msg]).as_bytes()).unwrap();
|
||||
let ct_b64 = parse_bulk(&read_reply(&mut s)).expect("ct b64");
|
||||
println!("ciphertext b64: {}", ct_b64);
|
||||
|
||||
// Decrypt by name
|
||||
s.write_all(arr(&["age","decryptname","alice", &ct_b64]).as_bytes()).unwrap();
|
||||
let pt = parse_bulk(&read_reply(&mut s)).expect("pt");
|
||||
assert_eq!(pt, msg);
|
||||
println!("decrypted ok");
|
||||
|
||||
// Sign by name
|
||||
s.write_all(arr(&["age","signname","signer", msg]).as_bytes()).unwrap();
|
||||
let sig_b64 = parse_bulk(&read_reply(&mut s)).expect("sig b64");
|
||||
|
||||
// Verify by name
|
||||
s.write_all(arr(&["age","verifyname","signer", msg, &sig_b64]).as_bytes()).unwrap();
|
||||
let ok = parse_simple(&read_reply(&mut s)).expect("verify");
|
||||
assert_eq!(ok, "1");
|
||||
println!("signature verified");
|
||||
|
||||
// List names
|
||||
s.write_all(arr(&["age","list"]).as_bytes()).unwrap();
|
||||
let list = read_reply(&mut s);
|
||||
println!("LIST -> {list}");
|
||||
|
||||
println!("✔ persistent AGE workflow complete.");
|
||||
}
|
@@ -1,4 +1,3 @@
|
||||
Perfect — here’s a tiny “factory” you can drop in.
|
||||
|
||||
### Cargo.toml
|
||||
|
@@ -6,20 +6,19 @@ echo "=========================================="
|
||||
echo ""
|
||||
echo "1️⃣ Running Simple Redis Tests (4 tests)..."
|
||||
echo "----------------------------------------------"
|
||||
cargo test --test simple_redis_test -- --nocapture
|
||||
cargo test -p herodb --test simple_redis_test -- --nocapture
|
||||
|
||||
echo ""
|
||||
echo "2️⃣ Running Comprehensive Redis Integration Tests (13 tests)..."
|
||||
echo "----------------------------------------------------------------"
|
||||
cargo test --test redis_integration_tests -- --nocapture
|
||||
cargo test --test redis_basic_client -- --nocapture
|
||||
cargo test --test debug_hset -- --nocapture
|
||||
cargo test --test debug_hset_simple -- --nocapture
|
||||
cargo test -p herodb --test redis_integration_tests -- --nocapture
|
||||
cargo test -p herodb --test debug_hset -- --nocapture
|
||||
cargo test -p herodb --test debug_hset_simple -- --nocapture
|
||||
|
||||
echo ""
|
||||
echo "3️⃣ Running All Tests..."
|
||||
echo "------------------------"
|
||||
cargo test -- --nocapture
|
||||
echo "3️⃣ Running All Workspace Tests..."
|
||||
echo "--------------------------------"
|
||||
cargo test --workspace -- --nocapture
|
||||
|
||||
echo ""
|
||||
echo "✅ Test execution completed!"
|
308
herodb/src/age.rs
Normal file
308
herodb/src/age.rs
Normal file
@@ -0,0 +1,308 @@
|
||||
//! age.rs — AGE (rage) helpers + persistent key management for your mini-Redis.
|
||||
//
|
||||
// Features:
|
||||
// - X25519 encryption/decryption (age style)
|
||||
// - Ed25519 detached signatures + verification
|
||||
// - Persistent named keys in DB (strings):
|
||||
// age:key:{name} -> X25519 recipient (public encryption key, "age1...")
|
||||
// age:privkey:{name} -> X25519 identity (secret encryption key, "AGE-SECRET-KEY-1...")
|
||||
// age:signpub:{name} -> Ed25519 verify pubkey (public, used to verify signatures)
|
||||
// age:signpriv:{name} -> Ed25519 signing secret key (private, used to sign)
|
||||
// - Base64 wrapping for ciphertext/signature binary blobs.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use secrecy::ExposeSecret;
|
||||
use age::{Decryptor, Encryptor};
|
||||
use age::x25519;
|
||||
|
||||
use ed25519_dalek::{Signature, Signer, Verifier, SigningKey, VerifyingKey};
|
||||
|
||||
use base64::{engine::general_purpose::STANDARD as B64, Engine as _};
|
||||
|
||||
use crate::protocol::Protocol;
|
||||
use crate::server::Server;
|
||||
use crate::error::DBError;
|
||||
|
||||
// ---------- Internal helpers ----------
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum AgeWireError {
|
||||
ParseKey,
|
||||
Crypto(String),
|
||||
Utf8,
|
||||
SignatureLen,
|
||||
NotFound(&'static str), // which kind of key was missing
|
||||
Storage(String),
|
||||
}
|
||||
|
||||
impl AgeWireError {
|
||||
fn to_protocol(self) -> Protocol {
|
||||
match self {
|
||||
AgeWireError::ParseKey => Protocol::err("ERR age: invalid key"),
|
||||
AgeWireError::Crypto(e) => Protocol::err(&format!("ERR age: {e}")),
|
||||
AgeWireError::Utf8 => Protocol::err("ERR age: invalid UTF-8 plaintext"),
|
||||
AgeWireError::SignatureLen => Protocol::err("ERR age: bad signature length"),
|
||||
AgeWireError::NotFound(w) => Protocol::err(&format!("ERR age: missing {w}")),
|
||||
AgeWireError::Storage(e) => Protocol::err(&format!("ERR storage: {e}")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_recipient(s: &str) -> Result<x25519::Recipient, AgeWireError> {
|
||||
x25519::Recipient::from_str(s).map_err(|_| AgeWireError::ParseKey)
|
||||
}
|
||||
fn parse_identity(s: &str) -> Result<x25519::Identity, AgeWireError> {
|
||||
x25519::Identity::from_str(s).map_err(|_| AgeWireError::ParseKey)
|
||||
}
|
||||
fn parse_ed25519_signing_key(s: &str) -> Result<SigningKey, AgeWireError> {
|
||||
// Parse base64-encoded signing key
|
||||
let bytes = B64.decode(s).map_err(|_| AgeWireError::ParseKey)?;
|
||||
if bytes.len() != 32 {
|
||||
return Err(AgeWireError::ParseKey);
|
||||
}
|
||||
let key_bytes: [u8; 32] = bytes.try_into().map_err(|_| AgeWireError::ParseKey)?;
|
||||
Ok(SigningKey::from_bytes(&key_bytes))
|
||||
}
|
||||
fn parse_ed25519_verifying_key(s: &str) -> Result<VerifyingKey, AgeWireError> {
|
||||
// Parse base64-encoded verifying key
|
||||
let bytes = B64.decode(s).map_err(|_| AgeWireError::ParseKey)?;
|
||||
if bytes.len() != 32 {
|
||||
return Err(AgeWireError::ParseKey);
|
||||
}
|
||||
let key_bytes: [u8; 32] = bytes.try_into().map_err(|_| AgeWireError::ParseKey)?;
|
||||
VerifyingKey::from_bytes(&key_bytes).map_err(|_| AgeWireError::ParseKey)
|
||||
}
|
||||
|
||||
// ---------- Stateless crypto helpers (string in/out) ----------
|
||||
|
||||
pub fn gen_enc_keypair() -> (String, String) {
|
||||
let id = x25519::Identity::generate();
|
||||
let pk = id.to_public();
|
||||
(pk.to_string(), id.to_string().expose_secret().to_string()) // (recipient, identity)
|
||||
}
|
||||
|
||||
pub fn gen_sign_keypair() -> (String, String) {
|
||||
use rand::RngCore;
|
||||
use rand::rngs::OsRng;
|
||||
|
||||
// Generate random 32 bytes for the signing key
|
||||
let mut secret_bytes = [0u8; 32];
|
||||
OsRng.fill_bytes(&mut secret_bytes);
|
||||
|
||||
let signing_key = SigningKey::from_bytes(&secret_bytes);
|
||||
let verifying_key = signing_key.verifying_key();
|
||||
|
||||
// Encode as base64 for storage
|
||||
let signing_key_b64 = B64.encode(signing_key.to_bytes());
|
||||
let verifying_key_b64 = B64.encode(verifying_key.to_bytes());
|
||||
|
||||
(verifying_key_b64, signing_key_b64) // (verify_pub, signing_secret)
|
||||
}
|
||||
|
||||
/// Encrypt `msg` for `recipient_str` (X25519). Returns base64(ciphertext).
|
||||
pub fn encrypt_b64(recipient_str: &str, msg: &str) -> Result<String, AgeWireError> {
|
||||
let recipient = parse_recipient(recipient_str)?;
|
||||
let enc = Encryptor::with_recipients(vec![Box::new(recipient)])
|
||||
.expect("failed to create encryptor"); // Handle Option<Encryptor>
|
||||
let mut out = Vec::new();
|
||||
{
|
||||
use std::io::Write;
|
||||
let mut w = enc.wrap_output(&mut out).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
w.write_all(msg.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
w.finish().map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
}
|
||||
Ok(B64.encode(out))
|
||||
}
|
||||
|
||||
/// Decrypt base64(ciphertext) with `identity_str`. Returns plaintext String.
|
||||
pub fn decrypt_b64(identity_str: &str, ct_b64: &str) -> Result<String, AgeWireError> {
|
||||
let id = parse_identity(identity_str)?;
|
||||
let ct = B64.decode(ct_b64.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
let dec = Decryptor::new(&ct[..]).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
|
||||
// The decrypt method returns a Result<StreamReader, DecryptError>
|
||||
let mut r = match dec {
|
||||
Decryptor::Recipients(d) => d.decrypt(std::iter::once(&id as &dyn age::Identity))
|
||||
.map_err(|e| AgeWireError::Crypto(e.to_string()))?,
|
||||
Decryptor::Passphrase(_) => return Err(AgeWireError::Crypto("Expected recipients, got passphrase".to_string())),
|
||||
};
|
||||
|
||||
let mut pt = Vec::new();
|
||||
use std::io::Read;
|
||||
r.read_to_end(&mut pt).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
String::from_utf8(pt).map_err(|_| AgeWireError::Utf8)
|
||||
}
|
||||
|
||||
/// Sign bytes of `msg` (detached). Returns base64(signature bytes, 64 bytes).
|
||||
pub fn sign_b64(signing_secret_str: &str, msg: &str) -> Result<String, AgeWireError> {
|
||||
let signing_key = parse_ed25519_signing_key(signing_secret_str)?;
|
||||
let sig = signing_key.sign(msg.as_bytes());
|
||||
Ok(B64.encode(sig.to_bytes()))
|
||||
}
|
||||
|
||||
/// Verify detached signature (base64) for `msg` with pubkey.
|
||||
pub fn verify_b64(verify_pub_str: &str, msg: &str, sig_b64: &str) -> Result<bool, AgeWireError> {
|
||||
let verifying_key = parse_ed25519_verifying_key(verify_pub_str)?;
|
||||
let sig_bytes = B64.decode(sig_b64.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?;
|
||||
if sig_bytes.len() != 64 {
|
||||
return Err(AgeWireError::SignatureLen);
|
||||
}
|
||||
let sig = Signature::from_bytes(sig_bytes[..].try_into().unwrap());
|
||||
Ok(verifying_key.verify(msg.as_bytes(), &sig).is_ok())
|
||||
}
|
||||
|
||||
// ---------- Storage helpers ----------
|
||||
|
||||
fn sget(server: &Server, key: &str) -> Result<Option<String>, AgeWireError> {
|
||||
let st = server.current_storage().map_err(|e| AgeWireError::Storage(e.0))?;
|
||||
st.get(key).map_err(|e| AgeWireError::Storage(e.0))
|
||||
}
|
||||
fn sset(server: &Server, key: &str, val: &str) -> Result<(), AgeWireError> {
|
||||
let st = server.current_storage().map_err(|e| AgeWireError::Storage(e.0))?;
|
||||
st.set(key.to_string(), val.to_string()).map_err(|e| AgeWireError::Storage(e.0))
|
||||
}
|
||||
|
||||
fn enc_pub_key_key(name: &str) -> String { format!("age:key:{name}") }
|
||||
fn enc_priv_key_key(name: &str) -> String { format!("age:privkey:{name}") }
|
||||
fn sign_pub_key_key(name: &str) -> String { format!("age:signpub:{name}") }
|
||||
fn sign_priv_key_key(name: &str) -> String { format!("age:signpriv:{name}") }
|
||||
|
||||
// ---------- Command handlers (RESP Protocol) ----------
|
||||
// Basic (stateless) ones kept for completeness
|
||||
|
||||
pub async fn cmd_age_genenc() -> Protocol {
|
||||
let (recip, ident) = gen_enc_keypair();
|
||||
Protocol::Array(vec![Protocol::BulkString(recip), Protocol::BulkString(ident)])
|
||||
}
|
||||
|
||||
pub async fn cmd_age_gensign() -> Protocol {
|
||||
let (verify, secret) = gen_sign_keypair();
|
||||
Protocol::Array(vec![Protocol::BulkString(verify), Protocol::BulkString(secret)])
|
||||
}
|
||||
|
||||
pub async fn cmd_age_encrypt(recipient: &str, message: &str) -> Protocol {
|
||||
match encrypt_b64(recipient, message) {
|
||||
Ok(b64) => Protocol::BulkString(b64),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_decrypt(identity: &str, ct_b64: &str) -> Protocol {
|
||||
match decrypt_b64(identity, ct_b64) {
|
||||
Ok(pt) => Protocol::BulkString(pt),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_sign(secret: &str, message: &str) -> Protocol {
|
||||
match sign_b64(secret, message) {
|
||||
Ok(b64sig) => Protocol::BulkString(b64sig),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_verify(verify_pub: &str, message: &str, sig_b64: &str) -> Protocol {
|
||||
match verify_b64(verify_pub, message, sig_b64) {
|
||||
Ok(true) => Protocol::SimpleString("1".to_string()),
|
||||
Ok(false) => Protocol::SimpleString("0".to_string()),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- NEW: Persistent, named-key commands ----------
|
||||
|
||||
pub async fn cmd_age_keygen(server: &Server, name: &str) -> Protocol {
|
||||
let (recip, ident) = gen_enc_keypair();
|
||||
if let Err(e) = sset(server, &enc_pub_key_key(name), &recip) { return e.to_protocol(); }
|
||||
if let Err(e) = sset(server, &enc_priv_key_key(name), &ident) { return e.to_protocol(); }
|
||||
Protocol::Array(vec![Protocol::BulkString(recip), Protocol::BulkString(ident)])
|
||||
}
|
||||
|
||||
pub async fn cmd_age_signkeygen(server: &Server, name: &str) -> Protocol {
|
||||
let (verify, secret) = gen_sign_keypair();
|
||||
if let Err(e) = sset(server, &sign_pub_key_key(name), &verify) { return e.to_protocol(); }
|
||||
if let Err(e) = sset(server, &sign_priv_key_key(name), &secret) { return e.to_protocol(); }
|
||||
Protocol::Array(vec![Protocol::BulkString(verify), Protocol::BulkString(secret)])
|
||||
}
|
||||
|
||||
pub async fn cmd_age_encrypt_name(server: &Server, name: &str, message: &str) -> Protocol {
|
||||
let recip = match sget(server, &enc_pub_key_key(name)) {
|
||||
Ok(Some(v)) => v,
|
||||
Ok(None) => return AgeWireError::NotFound("recipient (age:key:{name})").to_protocol(),
|
||||
Err(e) => return e.to_protocol(),
|
||||
};
|
||||
match encrypt_b64(&recip, message) {
|
||||
Ok(ct) => Protocol::BulkString(ct),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_decrypt_name(server: &Server, name: &str, ct_b64: &str) -> Protocol {
|
||||
let ident = match sget(server, &enc_priv_key_key(name)) {
|
||||
Ok(Some(v)) => v,
|
||||
Ok(None) => return AgeWireError::NotFound("identity (age:privkey:{name})").to_protocol(),
|
||||
Err(e) => return e.to_protocol(),
|
||||
};
|
||||
match decrypt_b64(&ident, ct_b64) {
|
||||
Ok(pt) => Protocol::BulkString(pt),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_sign_name(server: &Server, name: &str, message: &str) -> Protocol {
|
||||
let sec = match sget(server, &sign_priv_key_key(name)) {
|
||||
Ok(Some(v)) => v,
|
||||
Ok(None) => return AgeWireError::NotFound("signing secret (age:signpriv:{name})").to_protocol(),
|
||||
Err(e) => return e.to_protocol(),
|
||||
};
|
||||
match sign_b64(&sec, message) {
|
||||
Ok(sig) => Protocol::BulkString(sig),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_verify_name(server: &Server, name: &str, message: &str, sig_b64: &str) -> Protocol {
|
||||
let pubk = match sget(server, &sign_pub_key_key(name)) {
|
||||
Ok(Some(v)) => v,
|
||||
Ok(None) => return AgeWireError::NotFound("verify pubkey (age:signpub:{name})").to_protocol(),
|
||||
Err(e) => return e.to_protocol(),
|
||||
};
|
||||
match verify_b64(&pubk, message, sig_b64) {
|
||||
Ok(true) => Protocol::SimpleString("1".to_string()),
|
||||
Ok(false) => Protocol::SimpleString("0".to_string()),
|
||||
Err(e) => e.to_protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cmd_age_list(server: &Server) -> Protocol {
|
||||
// Returns 4 arrays: ["encpub", <names...>], ["encpriv", ...], ["signpub", ...], ["signpriv", ...]
|
||||
let st = match server.current_storage() { Ok(s) => s, Err(e) => return Protocol::err(&e.0) };
|
||||
|
||||
let pull = |pat: &str, prefix: &str| -> Result<Vec<String>, DBError> {
|
||||
let keys = st.keys(pat)?;
|
||||
let mut names: Vec<String> = keys.into_iter()
|
||||
.filter_map(|k| k.strip_prefix(prefix).map(|x| x.to_string()))
|
||||
.collect();
|
||||
names.sort();
|
||||
Ok(names)
|
||||
};
|
||||
|
||||
let encpub = match pull("age:key:*", "age:key:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
|
||||
let encpriv = match pull("age:privkey:*", "age:privkey:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
|
||||
let signpub = match pull("age:signpub:*", "age:signpub:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
|
||||
let signpriv= match pull("age:signpriv:*", "age:signpriv:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) };
|
||||
|
||||
let to_arr = |label: &str, v: Vec<String>| {
|
||||
let mut out = vec![Protocol::BulkString(label.to_string())];
|
||||
out.push(Protocol::Array(v.into_iter().map(Protocol::BulkString).collect()));
|
||||
Protocol::Array(out)
|
||||
};
|
||||
|
||||
Protocol::Array(vec![
|
||||
to_arr("encpub", encpub),
|
||||
to_arr("encpriv", encpriv),
|
||||
to_arr("signpub", signpub),
|
||||
to_arr("signpriv", signpriv),
|
||||
])
|
||||
}
|
@@ -1,9 +1,11 @@
|
||||
use crate::{error::DBError, protocol::Protocol, server::Server};
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Cmd {
|
||||
Ping,
|
||||
Echo(String),
|
||||
Select(u64), // Changed from u16 to u64
|
||||
Get(String),
|
||||
Set(String, String),
|
||||
SetPx(String, String, u128),
|
||||
@@ -46,7 +48,24 @@ pub enum Cmd {
|
||||
LTrim(String, i64, i64),
|
||||
LIndex(String, i64),
|
||||
LRange(String, i64, i64),
|
||||
FlushDb,
|
||||
Unknow(String),
|
||||
// AGE (rage) commands — stateless
|
||||
AgeGenEnc,
|
||||
AgeGenSign,
|
||||
AgeEncrypt(String, String), // recipient, message
|
||||
AgeDecrypt(String, String), // identity, ciphertext_b64
|
||||
AgeSign(String, String), // signing_secret, message
|
||||
AgeVerify(String, String, String), // verify_pub, message, signature_b64
|
||||
|
||||
// NEW: persistent named-key commands
|
||||
AgeKeygen(String), // name
|
||||
AgeSignKeygen(String), // name
|
||||
AgeEncryptName(String, String), // name, message
|
||||
AgeDecryptName(String, String), // name, ciphertext_b64
|
||||
AgeSignName(String, String), // name, message
|
||||
AgeVerifyName(String, String, String), // name, message, signature_b64
|
||||
AgeList,
|
||||
}
|
||||
|
||||
impl Cmd {
|
||||
@@ -60,6 +79,13 @@ impl Cmd {
|
||||
}
|
||||
Ok((
|
||||
match cmd[0].to_lowercase().as_str() {
|
||||
"select" => {
|
||||
if cmd.len() != 2 {
|
||||
return Err(DBError("wrong number of arguments for SELECT".to_string()));
|
||||
}
|
||||
let idx = cmd[1].parse::<u64>().map_err(|_| DBError("ERR DB index is not an integer".to_string()))?;
|
||||
Cmd::Select(idx)
|
||||
}
|
||||
"echo" => Cmd::Echo(cmd[1].clone()),
|
||||
"ping" => Cmd::Ping,
|
||||
"get" => Cmd::Get(cmd[1].clone()),
|
||||
@@ -386,6 +412,49 @@ impl Cmd {
|
||||
let stop = cmd[3].parse::<i64>().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?;
|
||||
Cmd::LRange(cmd[1].clone(), start, stop)
|
||||
}
|
||||
"flushdb" => {
|
||||
if cmd.len() != 1 {
|
||||
return Err(DBError("wrong number of arguments for FLUSHDB command".to_string()));
|
||||
}
|
||||
Cmd::FlushDb
|
||||
}
|
||||
"age" => {
|
||||
if cmd.len() < 2 {
|
||||
return Err(DBError("wrong number of arguments for AGE".to_string()));
|
||||
}
|
||||
match cmd[1].to_lowercase().as_str() {
|
||||
// stateless
|
||||
"genenc" => { if cmd.len() != 2 { return Err(DBError("AGE GENENC takes no args".to_string())); }
|
||||
Cmd::AgeGenEnc }
|
||||
"gensign" => { if cmd.len() != 2 { return Err(DBError("AGE GENSIGN takes no args".to_string())); }
|
||||
Cmd::AgeGenSign }
|
||||
"encrypt" => { if cmd.len() != 4 { return Err(DBError("AGE ENCRYPT <recipient> <message>".to_string())); }
|
||||
Cmd::AgeEncrypt(cmd[2].clone(), cmd[3].clone()) }
|
||||
"decrypt" => { if cmd.len() != 4 { return Err(DBError("AGE DECRYPT <identity> <ciphertext_b64>".to_string())); }
|
||||
Cmd::AgeDecrypt(cmd[2].clone(), cmd[3].clone()) }
|
||||
"sign" => { if cmd.len() != 4 { return Err(DBError("AGE SIGN <signing_secret> <message>".to_string())); }
|
||||
Cmd::AgeSign(cmd[2].clone(), cmd[3].clone()) }
|
||||
"verify" => { if cmd.len() != 5 { return Err(DBError("AGE VERIFY <verify_pub> <message> <signature_b64>".to_string())); }
|
||||
Cmd::AgeVerify(cmd[2].clone(), cmd[3].clone(), cmd[4].clone()) }
|
||||
|
||||
// persistent names
|
||||
"keygen" => { if cmd.len() != 3 { return Err(DBError("AGE KEYGEN <name>".to_string())); }
|
||||
Cmd::AgeKeygen(cmd[2].clone()) }
|
||||
"signkeygen" => { if cmd.len() != 3 { return Err(DBError("AGE SIGNKEYGEN <name>".to_string())); }
|
||||
Cmd::AgeSignKeygen(cmd[2].clone()) }
|
||||
"encryptname" => { if cmd.len() != 4 { return Err(DBError("AGE ENCRYPTNAME <name> <message>".to_string())); }
|
||||
Cmd::AgeEncryptName(cmd[2].clone(), cmd[3].clone()) }
|
||||
"decryptname" => { if cmd.len() != 4 { return Err(DBError("AGE DECRYPTNAME <name> <ciphertext_b64>".to_string())); }
|
||||
Cmd::AgeDecryptName(cmd[2].clone(), cmd[3].clone()) }
|
||||
"signname" => { if cmd.len() != 4 { return Err(DBError("AGE SIGNNAME <name> <message>".to_string())); }
|
||||
Cmd::AgeSignName(cmd[2].clone(), cmd[3].clone()) }
|
||||
"verifyname" => { if cmd.len() != 5 { return Err(DBError("AGE VERIFYNAME <name> <message> <signature_b64>".to_string())); }
|
||||
Cmd::AgeVerifyName(cmd[2].clone(), cmd[3].clone(), cmd[4].clone()) }
|
||||
"list" => { if cmd.len() != 2 { return Err(DBError("AGE LIST".to_string())); }
|
||||
Cmd::AgeList }
|
||||
_ => return Err(DBError(format!("unsupported AGE subcommand {:?}", cmd))),
|
||||
}
|
||||
}
|
||||
_ => Cmd::Unknow(cmd[0].clone()),
|
||||
},
|
||||
protocol,
|
||||
@@ -399,90 +468,125 @@ impl Cmd {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(
|
||||
&self,
|
||||
server: &mut Server,
|
||||
protocol: Protocol,
|
||||
queued_cmd: &mut Option<Vec<(Cmd, Protocol)>>,
|
||||
) -> Result<Protocol, DBError> {
|
||||
pub async fn run(self, server: &mut Server) -> Result<Protocol, DBError> {
|
||||
// Handle queued commands for transactions
|
||||
if queued_cmd.is_some()
|
||||
if server.queued_cmd.is_some()
|
||||
&& !matches!(self, Cmd::Exec)
|
||||
&& !matches!(self, Cmd::Multi)
|
||||
&& !matches!(self, Cmd::Discard)
|
||||
{
|
||||
queued_cmd
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.push((self.clone(), protocol.clone()));
|
||||
let protocol = self.clone().to_protocol();
|
||||
server.queued_cmd.as_mut().unwrap().push((self, protocol));
|
||||
return Ok(Protocol::SimpleString("QUEUED".to_string()));
|
||||
}
|
||||
|
||||
match self {
|
||||
Cmd::Select(db) => select_cmd(server, db).await,
|
||||
Cmd::Ping => Ok(Protocol::SimpleString("PONG".to_string())),
|
||||
Cmd::Echo(s) => Ok(Protocol::SimpleString(s.clone())),
|
||||
Cmd::Get(k) => get_cmd(server, k).await,
|
||||
Cmd::Set(k, v) => set_cmd(server, k, v).await,
|
||||
Cmd::SetPx(k, v, x) => set_px_cmd(server, k, v, x).await,
|
||||
Cmd::SetEx(k, v, x) => set_ex_cmd(server, k, v, x).await,
|
||||
Cmd::Del(k) => del_cmd(server, k).await,
|
||||
Cmd::ConfigGet(name) => config_get_cmd(name, server),
|
||||
Cmd::Echo(s) => Ok(Protocol::BulkString(s)),
|
||||
Cmd::Get(k) => get_cmd(server, &k).await,
|
||||
Cmd::Set(k, v) => set_cmd(server, &k, &v).await,
|
||||
Cmd::SetPx(k, v, x) => set_px_cmd(server, &k, &v, &x).await,
|
||||
Cmd::SetEx(k, v, x) => set_ex_cmd(server, &k, &v, &x).await,
|
||||
Cmd::Del(k) => del_cmd(server, &k).await,
|
||||
Cmd::ConfigGet(name) => config_get_cmd(&name, server),
|
||||
Cmd::Keys => keys_cmd(server).await,
|
||||
Cmd::Info(section) => info_cmd(section),
|
||||
Cmd::Type(k) => type_cmd(server, k).await,
|
||||
Cmd::Incr(key) => incr_cmd(server, key).await,
|
||||
Cmd::Info(section) => info_cmd(server, §ion).await,
|
||||
Cmd::Type(k) => type_cmd(server, &k).await,
|
||||
Cmd::Incr(key) => incr_cmd(server, &key).await,
|
||||
Cmd::Multi => {
|
||||
*queued_cmd = Some(Vec::<(Cmd, Protocol)>::new());
|
||||
server.queued_cmd = Some(Vec::<(Cmd, Protocol)>::new());
|
||||
Ok(Protocol::SimpleString("OK".to_string()))
|
||||
}
|
||||
Cmd::Exec => exec_cmd(queued_cmd, server).await,
|
||||
Cmd::Exec => exec_cmd(server).await,
|
||||
Cmd::Discard => {
|
||||
if queued_cmd.is_some() {
|
||||
*queued_cmd = None;
|
||||
if server.queued_cmd.is_some() {
|
||||
server.queued_cmd = None;
|
||||
Ok(Protocol::SimpleString("OK".to_string()))
|
||||
} else {
|
||||
Ok(Protocol::err("ERR DISCARD without MULTI"))
|
||||
}
|
||||
}
|
||||
// Hash commands
|
||||
Cmd::HSet(key, pairs) => hset_cmd(server, key, pairs).await,
|
||||
Cmd::HGet(key, field) => hget_cmd(server, key, field).await,
|
||||
Cmd::HGetAll(key) => hgetall_cmd(server, key).await,
|
||||
Cmd::HDel(key, fields) => hdel_cmd(server, key, fields).await,
|
||||
Cmd::HExists(key, field) => hexists_cmd(server, key, field).await,
|
||||
Cmd::HKeys(key) => hkeys_cmd(server, key).await,
|
||||
Cmd::HVals(key) => hvals_cmd(server, key).await,
|
||||
Cmd::HLen(key) => hlen_cmd(server, key).await,
|
||||
Cmd::HMGet(key, fields) => hmget_cmd(server, key, fields).await,
|
||||
Cmd::HSetNx(key, field, value) => hsetnx_cmd(server, key, field, value).await,
|
||||
Cmd::HScan(key, cursor, pattern, count) => hscan_cmd(server, key, cursor, pattern.as_deref(), count).await,
|
||||
Cmd::Scan(cursor, pattern, count) => scan_cmd(server, cursor, pattern.as_deref(), count).await,
|
||||
Cmd::Ttl(key) => ttl_cmd(server, key).await,
|
||||
Cmd::Exists(key) => exists_cmd(server, key).await,
|
||||
Cmd::HSet(key, pairs) => hset_cmd(server, &key, &pairs).await,
|
||||
Cmd::HGet(key, field) => hget_cmd(server, &key, &field).await,
|
||||
Cmd::HGetAll(key) => hgetall_cmd(server, &key).await,
|
||||
Cmd::HDel(key, fields) => hdel_cmd(server, &key, &fields).await,
|
||||
Cmd::HExists(key, field) => hexists_cmd(server, &key, &field).await,
|
||||
Cmd::HKeys(key) => hkeys_cmd(server, &key).await,
|
||||
Cmd::HVals(key) => hvals_cmd(server, &key).await,
|
||||
Cmd::HLen(key) => hlen_cmd(server, &key).await,
|
||||
Cmd::HMGet(key, fields) => hmget_cmd(server, &key, &fields).await,
|
||||
Cmd::HSetNx(key, field, value) => hsetnx_cmd(server, &key, &field, &value).await,
|
||||
Cmd::HScan(key, cursor, pattern, count) => hscan_cmd(server, &key, &cursor, pattern.as_deref(), &count).await,
|
||||
Cmd::Scan(cursor, pattern, count) => scan_cmd(server, &cursor, pattern.as_deref(), &count).await,
|
||||
Cmd::Ttl(key) => ttl_cmd(server, &key).await,
|
||||
Cmd::Exists(key) => exists_cmd(server, &key).await,
|
||||
Cmd::Quit => Ok(Protocol::SimpleString("OK".to_string())),
|
||||
Cmd::Client(_) => Ok(Protocol::SimpleString("OK".to_string())),
|
||||
Cmd::ClientSetName(name) => client_setname_cmd(server, name).await,
|
||||
Cmd::ClientSetName(name) => client_setname_cmd(server, &name).await,
|
||||
Cmd::ClientGetName => client_getname_cmd(server).await,
|
||||
// List commands
|
||||
Cmd::LPush(key, elements) => lpush_cmd(server, key, elements).await,
|
||||
Cmd::RPush(key, elements) => rpush_cmd(server, key, elements).await,
|
||||
Cmd::LPop(key, count) => lpop_cmd(server, key, count).await,
|
||||
Cmd::RPop(key, count) => rpop_cmd(server, key, count).await,
|
||||
Cmd::LLen(key) => llen_cmd(server, key).await,
|
||||
Cmd::LRem(key, count, element) => lrem_cmd(server, key, *count, element).await,
|
||||
Cmd::LTrim(key, start, stop) => ltrim_cmd(server, key, *start, *stop).await,
|
||||
Cmd::LIndex(key, index) => lindex_cmd(server, key, *index).await,
|
||||
Cmd::LRange(key, start, stop) => lrange_cmd(server, key, *start, *stop).await,
|
||||
Cmd::Unknow(s) => {
|
||||
println!("\x1b[31;1munknown command: {}\x1b[0m", s);
|
||||
Ok(Protocol::err(&format!("ERR unknown command '{}'", s)))
|
||||
Cmd::LPush(key, elements) => lpush_cmd(server, &key, &elements).await,
|
||||
Cmd::RPush(key, elements) => rpush_cmd(server, &key, &elements).await,
|
||||
Cmd::LPop(key, count) => lpop_cmd(server, &key, &count).await,
|
||||
Cmd::RPop(key, count) => rpop_cmd(server, &key, &count).await,
|
||||
Cmd::LLen(key) => llen_cmd(server, &key).await,
|
||||
Cmd::LRem(key, count, element) => lrem_cmd(server, &key, count, &element).await,
|
||||
Cmd::LTrim(key, start, stop) => ltrim_cmd(server, &key, start, stop).await,
|
||||
Cmd::LIndex(key, index) => lindex_cmd(server, &key, index).await,
|
||||
Cmd::LRange(key, start, stop) => lrange_cmd(server, &key, start, stop).await,
|
||||
Cmd::FlushDb => flushdb_cmd(server).await,
|
||||
// AGE (rage): stateless
|
||||
Cmd::AgeGenEnc => Ok(crate::age::cmd_age_genenc().await),
|
||||
Cmd::AgeGenSign => Ok(crate::age::cmd_age_gensign().await),
|
||||
Cmd::AgeEncrypt(recipient, message) => Ok(crate::age::cmd_age_encrypt(&recipient, &message).await),
|
||||
Cmd::AgeDecrypt(identity, ct_b64) => Ok(crate::age::cmd_age_decrypt(&identity, &ct_b64).await),
|
||||
Cmd::AgeSign(secret, message) => Ok(crate::age::cmd_age_sign(&secret, &message).await),
|
||||
Cmd::AgeVerify(vpub, msg, sig_b64) => Ok(crate::age::cmd_age_verify(&vpub, &msg, &sig_b64).await),
|
||||
|
||||
// AGE (rage): persistent named keys
|
||||
Cmd::AgeKeygen(name) => Ok(crate::age::cmd_age_keygen(server, &name).await),
|
||||
Cmd::AgeSignKeygen(name) => Ok(crate::age::cmd_age_signkeygen(server, &name).await),
|
||||
Cmd::AgeEncryptName(name, message) => Ok(crate::age::cmd_age_encrypt_name(server, &name, &message).await),
|
||||
Cmd::AgeDecryptName(name, ct_b64) => Ok(crate::age::cmd_age_decrypt_name(server, &name, &ct_b64).await),
|
||||
Cmd::AgeSignName(name, message) => Ok(crate::age::cmd_age_sign_name(server, &name, &message).await),
|
||||
Cmd::AgeVerifyName(name, message, sig_b64) => Ok(crate::age::cmd_age_verify_name(server, &name, &message, &sig_b64).await),
|
||||
Cmd::AgeList => Ok(crate::age::cmd_age_list(server).await),
|
||||
Cmd::Unknow(s) => Ok(Protocol::err(&format!("ERR unknown command `{}`", s))),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_protocol(self) -> Protocol {
|
||||
match self {
|
||||
Cmd::Select(db) => Protocol::Array(vec![Protocol::BulkString("select".to_string()), Protocol::BulkString(db.to_string())]),
|
||||
Cmd::Ping => Protocol::Array(vec![Protocol::BulkString("ping".to_string())]),
|
||||
Cmd::Echo(s) => Protocol::Array(vec![Protocol::BulkString("echo".to_string()), Protocol::BulkString(s)]),
|
||||
Cmd::Get(k) => Protocol::Array(vec![Protocol::BulkString("get".to_string()), Protocol::BulkString(k)]),
|
||||
Cmd::Set(k, v) => Protocol::Array(vec![Protocol::BulkString("set".to_string()), Protocol::BulkString(k), Protocol::BulkString(v)]),
|
||||
_ => Protocol::SimpleString("...".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn flushdb_cmd(server: &mut Server) -> Result<Protocol, DBError> {
|
||||
match server.current_storage()?.flushdb() {
|
||||
Ok(_) => Ok(Protocol::SimpleString("OK".to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn select_cmd(server: &mut Server, db: u64) -> Result<Protocol, DBError> {
|
||||
// Test if we can access the database (this will create it if needed)
|
||||
server.selected_db = db;
|
||||
match server.current_storage() {
|
||||
Ok(_) => Ok(Protocol::SimpleString("OK".to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn lindex_cmd(server: &Server, key: &str, index: i64) -> Result<Protocol, DBError> {
|
||||
match server.storage.lindex(key, index) {
|
||||
match server.current_storage()?.lindex(key, index) {
|
||||
Ok(Some(element)) => Ok(Protocol::BulkString(element)),
|
||||
Ok(None) => Ok(Protocol::Null),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
@@ -490,106 +594,107 @@ async fn lindex_cmd(server: &Server, key: &str, index: i64) -> Result<Protocol,
|
||||
}
|
||||
|
||||
async fn lrange_cmd(server: &Server, key: &str, start: i64, stop: i64) -> Result<Protocol, DBError> {
|
||||
match server.storage.lrange(key, start, stop) {
|
||||
match server.current_storage()?.lrange(key, start, stop) {
|
||||
Ok(elements) => Ok(Protocol::Array(elements.into_iter().map(Protocol::BulkString).collect())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn ltrim_cmd(server: &Server, key: &str, start: i64, stop: i64) -> Result<Protocol, DBError> {
|
||||
match server.storage.ltrim(key, start, stop) {
|
||||
match server.current_storage()?.ltrim(key, start, stop) {
|
||||
Ok(_) => Ok(Protocol::SimpleString("OK".to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn lrem_cmd(server: &Server, key: &str, count: i64, element: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.lrem(key, count, element) {
|
||||
match server.current_storage()?.lrem(key, count, element) {
|
||||
Ok(removed_count) => Ok(Protocol::SimpleString(removed_count.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn llen_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.llen(key) {
|
||||
match server.current_storage()?.llen(key) {
|
||||
Ok(len) => Ok(Protocol::SimpleString(len.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn lpop_cmd(server: &Server, key: &str, count: &Option<u64>) -> Result<Protocol, DBError> {
|
||||
match server.storage.lpop(key, *count) {
|
||||
Ok(Some(elements)) => {
|
||||
if count.is_some() {
|
||||
Ok(Protocol::Array(elements.into_iter().map(Protocol::BulkString).collect()))
|
||||
} else {
|
||||
Ok(Protocol::BulkString(elements[0].clone()))
|
||||
}
|
||||
},
|
||||
Ok(None) => {
|
||||
let count_val = count.unwrap_or(1);
|
||||
match server.current_storage()?.lpop(key, count_val) {
|
||||
Ok(elements) => {
|
||||
if elements.is_empty() {
|
||||
if count.is_some() {
|
||||
Ok(Protocol::Array(vec![]))
|
||||
} else {
|
||||
Ok(Protocol::Null)
|
||||
}
|
||||
} else if count.is_some() {
|
||||
Ok(Protocol::Array(elements.into_iter().map(Protocol::BulkString).collect()))
|
||||
} else {
|
||||
Ok(Protocol::BulkString(elements[0].clone()))
|
||||
}
|
||||
},
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn rpop_cmd(server: &Server, key: &str, count: &Option<u64>) -> Result<Protocol, DBError> {
|
||||
match server.storage.rpop(key, *count) {
|
||||
Ok(Some(elements)) => {
|
||||
if count.is_some() {
|
||||
Ok(Protocol::Array(elements.into_iter().map(Protocol::BulkString).collect()))
|
||||
} else {
|
||||
Ok(Protocol::BulkString(elements[0].clone()))
|
||||
}
|
||||
},
|
||||
Ok(None) => {
|
||||
let count_val = count.unwrap_or(1);
|
||||
match server.current_storage()?.rpop(key, count_val) {
|
||||
Ok(elements) => {
|
||||
if elements.is_empty() {
|
||||
if count.is_some() {
|
||||
Ok(Protocol::Array(vec![]))
|
||||
} else {
|
||||
Ok(Protocol::Null)
|
||||
}
|
||||
} else if count.is_some() {
|
||||
Ok(Protocol::Array(elements.into_iter().map(Protocol::BulkString).collect()))
|
||||
} else {
|
||||
Ok(Protocol::BulkString(elements[0].clone()))
|
||||
}
|
||||
},
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn lpush_cmd(server: &Server, key: &str, elements: &[String]) -> Result<Protocol, DBError> {
|
||||
match server.storage.lpush(key, elements.to_vec()) {
|
||||
match server.current_storage()?.lpush(key, elements.to_vec()) {
|
||||
Ok(len) => Ok(Protocol::SimpleString(len.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn rpush_cmd(server: &Server, key: &str, elements: &[String]) -> Result<Protocol, DBError> {
|
||||
match server.storage.rpush(key, elements.to_vec()) {
|
||||
match server.current_storage()?.rpush(key, elements.to_vec()) {
|
||||
Ok(len) => Ok(Protocol::SimpleString(len.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn exec_cmd(
|
||||
queued_cmd: &mut Option<Vec<(Cmd, Protocol)>>,
|
||||
server: &mut Server,
|
||||
) -> Result<Protocol, DBError> {
|
||||
if queued_cmd.is_some() {
|
||||
let mut vec = Vec::new();
|
||||
for (cmd, protocol) in queued_cmd.as_ref().unwrap() {
|
||||
let res = Box::pin(cmd.run(server, protocol.clone(), &mut None)).await?;
|
||||
vec.push(res);
|
||||
}
|
||||
*queued_cmd = None;
|
||||
Ok(Protocol::Array(vec))
|
||||
async fn exec_cmd(server: &mut Server) -> Result<Protocol, DBError> {
|
||||
// Move the queued commands out of `server` so we drop the borrow immediately.
|
||||
let cmds = if let Some(cmds) = server.queued_cmd.take() {
|
||||
cmds
|
||||
} else {
|
||||
Ok(Protocol::err("ERR EXEC without MULTI"))
|
||||
return Ok(Protocol::err("ERR EXEC without MULTI"));
|
||||
};
|
||||
|
||||
let mut out = Vec::new();
|
||||
for (cmd, _) in cmds {
|
||||
// Use Box::pin to handle recursion in async function
|
||||
let res = Box::pin(cmd.run(server)).await?;
|
||||
out.push(res);
|
||||
}
|
||||
Ok(Protocol::Array(out))
|
||||
}
|
||||
|
||||
async fn incr_cmd(server: &Server, key: &String) -> Result<Protocol, DBError> {
|
||||
let current_value = server.storage.get(key)?;
|
||||
let storage = server.current_storage()?;
|
||||
let current_value = storage.get(key)?;
|
||||
|
||||
let new_value = match current_value {
|
||||
Some(v) => {
|
||||
@@ -601,36 +706,58 @@ async fn incr_cmd(server: &Server, key: &String) -> Result<Protocol, DBError> {
|
||||
None => 1,
|
||||
};
|
||||
|
||||
server.storage.set(key.clone(), new_value.to_string())?;
|
||||
storage.set(key.clone(), new_value.to_string())?;
|
||||
Ok(Protocol::SimpleString(new_value.to_string()))
|
||||
}
|
||||
|
||||
fn config_get_cmd(name: &String, server: &Server) -> Result<Protocol, DBError> {
|
||||
match name.as_str() {
|
||||
"dir" => Ok(Protocol::Array(vec![
|
||||
let value = match name.as_str() {
|
||||
"dir" => Some(server.option.dir.clone()),
|
||||
"dbfilename" => Some(format!("{}.db", server.selected_db)),
|
||||
"databases" => Some("16".to_string()), // Hardcoded as per original logic
|
||||
_ => None,
|
||||
};
|
||||
|
||||
if let Some(val) = value {
|
||||
Ok(Protocol::Array(vec![
|
||||
Protocol::BulkString(name.clone()),
|
||||
Protocol::BulkString(server.option.dir.clone()),
|
||||
])),
|
||||
"dbfilename" => Ok(Protocol::Array(vec![
|
||||
Protocol::BulkString(name.clone()),
|
||||
Protocol::BulkString("herodb.redb".to_string()),
|
||||
])),
|
||||
"databases" => Ok(Protocol::Array(vec![
|
||||
Protocol::BulkString(name.clone()),
|
||||
Protocol::BulkString("16".to_string()),
|
||||
])),
|
||||
_ => Ok(Protocol::Array(vec![])), // Return empty array for unknown configs instead of error
|
||||
Protocol::BulkString(val),
|
||||
]))
|
||||
} else {
|
||||
// Return an empty array for unknown config options, which is standard Redis behavior
|
||||
Ok(Protocol::Array(vec![]))
|
||||
}
|
||||
}
|
||||
|
||||
async fn keys_cmd(server: &Server) -> Result<Protocol, DBError> {
|
||||
let keys = server.storage.keys("*")?;
|
||||
let keys = server.current_storage()?.keys("*")?;
|
||||
Ok(Protocol::Array(
|
||||
keys.into_iter().map(Protocol::BulkString).collect(),
|
||||
))
|
||||
}
|
||||
|
||||
fn info_cmd(section: &Option<String>) -> Result<Protocol, DBError> {
|
||||
#[derive(Serialize)]
|
||||
struct ServerInfo {
|
||||
redis_version: String,
|
||||
encrypted: bool,
|
||||
selected_db: u64,
|
||||
}
|
||||
|
||||
async fn info_cmd(server: &Server, section: &Option<String>) -> Result<Protocol, DBError> {
|
||||
let info = ServerInfo {
|
||||
redis_version: "7.0.0".to_string(),
|
||||
encrypted: server.current_storage()?.is_encrypted(),
|
||||
selected_db: server.selected_db,
|
||||
};
|
||||
|
||||
let mut info_string = String::new();
|
||||
info_string.push_str(&format!("# Server\n"));
|
||||
info_string.push_str(&format!("redis_version:{}\n", info.redis_version));
|
||||
info_string.push_str(&format!("encrypted:{}\n", if info.encrypted { 1 } else { 0 }));
|
||||
info_string.push_str(&format!("# Keyspace\n"));
|
||||
info_string.push_str(&format!("db{}:keys=0,expires=0,avg_ttl=0\n", info.selected_db));
|
||||
|
||||
|
||||
match section {
|
||||
Some(s) => match s.as_str() {
|
||||
"replication" => Ok(Protocol::BulkString(
|
||||
@@ -638,19 +765,21 @@ fn info_cmd(section: &Option<String>) -> Result<Protocol, DBError> {
|
||||
)),
|
||||
_ => Err(DBError(format!("unsupported section {:?}", s))),
|
||||
},
|
||||
None => Ok(Protocol::BulkString("# Server\nredis_version:7.0.0\n".to_string())),
|
||||
None => {
|
||||
Ok(Protocol::BulkString(info_string))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn type_cmd(server: &Server, k: &String) -> Result<Protocol, DBError> {
|
||||
match server.storage.get_key_type(k)? {
|
||||
match server.current_storage()?.get_key_type(k)? {
|
||||
Some(type_str) => Ok(Protocol::SimpleString(type_str)),
|
||||
None => Ok(Protocol::SimpleString("none".to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn del_cmd(server: &Server, k: &str) -> Result<Protocol, DBError> {
|
||||
server.storage.del(k.to_string())?;
|
||||
server.current_storage()?.del(k.to_string())?;
|
||||
Ok(Protocol::SimpleString("1".to_string()))
|
||||
}
|
||||
|
||||
@@ -660,7 +789,7 @@ async fn set_ex_cmd(
|
||||
v: &str,
|
||||
x: &u128,
|
||||
) -> Result<Protocol, DBError> {
|
||||
server.storage.setx(k.to_string(), v.to_string(), *x * 1000)?;
|
||||
server.current_storage()?.setx(k.to_string(), v.to_string(), *x * 1000)?;
|
||||
Ok(Protocol::SimpleString("OK".to_string()))
|
||||
}
|
||||
|
||||
@@ -670,28 +799,28 @@ async fn set_px_cmd(
|
||||
v: &str,
|
||||
x: &u128,
|
||||
) -> Result<Protocol, DBError> {
|
||||
server.storage.setx(k.to_string(), v.to_string(), *x)?;
|
||||
server.current_storage()?.setx(k.to_string(), v.to_string(), *x)?;
|
||||
Ok(Protocol::SimpleString("OK".to_string()))
|
||||
}
|
||||
|
||||
async fn set_cmd(server: &Server, k: &str, v: &str) -> Result<Protocol, DBError> {
|
||||
server.storage.set(k.to_string(), v.to_string())?;
|
||||
server.current_storage()?.set(k.to_string(), v.to_string())?;
|
||||
Ok(Protocol::SimpleString("OK".to_string()))
|
||||
}
|
||||
|
||||
async fn get_cmd(server: &Server, k: &str) -> Result<Protocol, DBError> {
|
||||
let v = server.storage.get(k)?;
|
||||
let v = server.current_storage()?.get(k)?;
|
||||
Ok(v.map_or(Protocol::Null, Protocol::BulkString))
|
||||
}
|
||||
|
||||
// Hash command implementations
|
||||
async fn hset_cmd(server: &Server, key: &str, pairs: &[(String, String)]) -> Result<Protocol, DBError> {
|
||||
let new_fields = server.storage.hset(key, pairs)?;
|
||||
let new_fields = server.current_storage()?.hset(key, pairs.to_vec())?;
|
||||
Ok(Protocol::SimpleString(new_fields.to_string()))
|
||||
}
|
||||
|
||||
async fn hget_cmd(server: &Server, key: &str, field: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hget(key, field) {
|
||||
match server.current_storage()?.hget(key, field) {
|
||||
Ok(Some(value)) => Ok(Protocol::BulkString(value)),
|
||||
Ok(None) => Ok(Protocol::Null),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
@@ -699,7 +828,7 @@ async fn hget_cmd(server: &Server, key: &str, field: &str) -> Result<Protocol, D
|
||||
}
|
||||
|
||||
async fn hgetall_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hgetall(key) {
|
||||
match server.current_storage()?.hgetall(key) {
|
||||
Ok(pairs) => {
|
||||
let mut result = Vec::new();
|
||||
for (field, value) in pairs {
|
||||
@@ -713,21 +842,21 @@ async fn hgetall_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
}
|
||||
|
||||
async fn hdel_cmd(server: &Server, key: &str, fields: &[String]) -> Result<Protocol, DBError> {
|
||||
match server.storage.hdel(key, fields) {
|
||||
match server.current_storage()?.hdel(key, fields.to_vec()) {
|
||||
Ok(deleted) => Ok(Protocol::SimpleString(deleted.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hexists_cmd(server: &Server, key: &str, field: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hexists(key, field) {
|
||||
match server.current_storage()?.hexists(key, field) {
|
||||
Ok(exists) => Ok(Protocol::SimpleString(if exists { "1" } else { "0" }.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hkeys_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hkeys(key) {
|
||||
match server.current_storage()?.hkeys(key) {
|
||||
Ok(keys) => Ok(Protocol::Array(
|
||||
keys.into_iter().map(Protocol::BulkString).collect(),
|
||||
)),
|
||||
@@ -736,7 +865,7 @@ async fn hkeys_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
}
|
||||
|
||||
async fn hvals_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hvals(key) {
|
||||
match server.current_storage()?.hvals(key) {
|
||||
Ok(values) => Ok(Protocol::Array(
|
||||
values.into_iter().map(Protocol::BulkString).collect(),
|
||||
)),
|
||||
@@ -745,14 +874,14 @@ async fn hvals_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
}
|
||||
|
||||
async fn hlen_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hlen(key) {
|
||||
match server.current_storage()?.hlen(key) {
|
||||
Ok(len) => Ok(Protocol::SimpleString(len.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hmget_cmd(server: &Server, key: &str, fields: &[String]) -> Result<Protocol, DBError> {
|
||||
match server.storage.hmget(key, fields) {
|
||||
match server.current_storage()?.hmget(key, fields.to_vec()) {
|
||||
Ok(values) => {
|
||||
let result: Vec<Protocol> = values
|
||||
.into_iter()
|
||||
@@ -765,49 +894,64 @@ async fn hmget_cmd(server: &Server, key: &str, fields: &[String]) -> Result<Prot
|
||||
}
|
||||
|
||||
async fn hsetnx_cmd(server: &Server, key: &str, field: &str, value: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.hsetnx(key, field, value) {
|
||||
match server.current_storage()?.hsetnx(key, field, value) {
|
||||
Ok(was_set) => Ok(Protocol::SimpleString(if was_set { "1" } else { "0" }.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn scan_cmd(server: &Server, cursor: &u64, pattern: Option<&str>, count: &Option<u64>) -> Result<Protocol, DBError> {
|
||||
match server.storage.scan(*cursor, pattern, *count) {
|
||||
Ok((next_cursor, keys)) => {
|
||||
async fn scan_cmd(
|
||||
server: &Server,
|
||||
cursor: &u64,
|
||||
pattern: Option<&str>,
|
||||
count: &Option<u64>
|
||||
) -> Result<Protocol, DBError> {
|
||||
match server.current_storage()?.scan(*cursor, pattern, *count) {
|
||||
Ok((next_cursor, key_value_pairs)) => {
|
||||
let mut result = Vec::new();
|
||||
result.push(Protocol::BulkString(next_cursor.to_string()));
|
||||
result.push(Protocol::Array(
|
||||
keys.into_iter().map(Protocol::BulkString).collect(),
|
||||
));
|
||||
// For SCAN, we only return the keys, not the values
|
||||
let keys: Vec<Protocol> = key_value_pairs.into_iter().map(|(key, _)| Protocol::BulkString(key)).collect();
|
||||
result.push(Protocol::Array(keys));
|
||||
Ok(Protocol::Array(result))
|
||||
}
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
Err(e) => Ok(Protocol::err(&format!("ERR {}", e.0))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hscan_cmd(server: &Server, key: &str, cursor: &u64, pattern: Option<&str>, count: &Option<u64>) -> Result<Protocol, DBError> {
|
||||
match server.storage.hscan(key, *cursor, pattern, *count) {
|
||||
Ok((next_cursor, fields)) => {
|
||||
async fn hscan_cmd(
|
||||
server: &Server,
|
||||
key: &str,
|
||||
cursor: &u64,
|
||||
pattern: Option<&str>,
|
||||
count: &Option<u64>
|
||||
) -> Result<Protocol, DBError> {
|
||||
match server.current_storage()?.hscan(key, *cursor, pattern, *count) {
|
||||
Ok((next_cursor, field_value_pairs)) => {
|
||||
let mut result = Vec::new();
|
||||
result.push(Protocol::BulkString(next_cursor.to_string()));
|
||||
result.push(Protocol::Array(
|
||||
fields.into_iter().map(Protocol::BulkString).collect(),
|
||||
));
|
||||
// For HSCAN, we return field-value pairs flattened
|
||||
let mut fields_and_values = Vec::new();
|
||||
for (field, value) in field_value_pairs {
|
||||
fields_and_values.push(Protocol::BulkString(field));
|
||||
fields_and_values.push(Protocol::BulkString(value));
|
||||
}
|
||||
result.push(Protocol::Array(fields_and_values));
|
||||
Ok(Protocol::Array(result))
|
||||
}
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
Err(e) => Ok(Protocol::err(&format!("ERR {}", e.0))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn ttl_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.ttl(key) {
|
||||
match server.current_storage()?.ttl(key) {
|
||||
Ok(ttl) => Ok(Protocol::SimpleString(ttl.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
||||
}
|
||||
|
||||
async fn exists_cmd(server: &Server, key: &str) -> Result<Protocol, DBError> {
|
||||
match server.storage.exists(key) {
|
||||
match server.current_storage()?.exists(key) {
|
||||
Ok(exists) => Ok(Protocol::SimpleString(if exists { "1" } else { "0" }.to_string())),
|
||||
Err(e) => Ok(Protocol::err(&e.0)),
|
||||
}
|
73
herodb/src/crypto.rs
Normal file
73
herodb/src/crypto.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit, OsRng},
|
||||
XChaCha20Poly1305, XNonce,
|
||||
};
|
||||
use rand::RngCore;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
const VERSION: u8 = 1;
|
||||
const NONCE_LEN: usize = 24;
|
||||
const TAG_LEN: usize = 16;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum CryptoError {
|
||||
Format, // wrong length / header
|
||||
Version(u8), // unknown version
|
||||
Decrypt, // wrong key or corrupted data
|
||||
}
|
||||
|
||||
impl From<CryptoError> for crate::error::DBError {
|
||||
fn from(e: CryptoError) -> Self {
|
||||
crate::error::DBError(format!("Crypto error: {:?}", e))
|
||||
}
|
||||
}
|
||||
|
||||
/// Super-simple factory: new(secret) + encrypt(bytes) + decrypt(bytes)
|
||||
pub struct CryptoFactory {
|
||||
key: chacha20poly1305::Key,
|
||||
}
|
||||
|
||||
impl CryptoFactory {
|
||||
/// Accepts any secret bytes; turns them into a 32-byte key (SHA-256).
|
||||
pub fn new<S: AsRef<[u8]>>(secret: S) -> Self {
|
||||
let mut h = Sha256::new();
|
||||
h.update(b"xchacha20poly1305-factory:v1"); // domain separation
|
||||
h.update(secret.as_ref());
|
||||
let digest = h.finalize(); // 32 bytes
|
||||
let key = chacha20poly1305::Key::from_slice(&digest).to_owned();
|
||||
Self { key }
|
||||
}
|
||||
|
||||
/// Output layout: [version:1][nonce:24][ciphertext||tag]
|
||||
pub fn encrypt(&self, plaintext: &[u8]) -> Vec<u8> {
|
||||
let cipher = XChaCha20Poly1305::new(&self.key);
|
||||
|
||||
let mut nonce_bytes = [0u8; NONCE_LEN];
|
||||
OsRng.fill_bytes(&mut nonce_bytes);
|
||||
let nonce = XNonce::from_slice(&nonce_bytes);
|
||||
|
||||
let mut out = Vec::with_capacity(1 + NONCE_LEN + plaintext.len() + TAG_LEN);
|
||||
out.push(VERSION);
|
||||
out.extend_from_slice(&nonce_bytes);
|
||||
|
||||
let ct = cipher.encrypt(nonce, plaintext).expect("encrypt");
|
||||
out.extend_from_slice(&ct);
|
||||
out
|
||||
}
|
||||
|
||||
pub fn decrypt(&self, blob: &[u8]) -> Result<Vec<u8>, CryptoError> {
|
||||
if blob.len() < 1 + NONCE_LEN + TAG_LEN {
|
||||
return Err(CryptoError::Format);
|
||||
}
|
||||
let ver = blob[0];
|
||||
if ver != VERSION {
|
||||
return Err(CryptoError::Version(ver));
|
||||
}
|
||||
|
||||
let nonce = XNonce::from_slice(&blob[1..1 + NONCE_LEN]);
|
||||
let ct = &blob[1 + NONCE_LEN..];
|
||||
|
||||
let cipher = XChaCha20Poly1305::new(&self.key);
|
||||
cipher.decrypt(nonce, ct).map_err(|_| CryptoError::Decrypt)
|
||||
}
|
||||
}
|
@@ -80,3 +80,15 @@ impl From<tokio::sync::mpsc::error::SendError<()>> for DBError {
|
||||
DBError(item.to_string().clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for DBError {
|
||||
fn from(item: serde_json::Error) -> Self {
|
||||
DBError(item.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<chacha20poly1305::Error> for DBError {
|
||||
fn from(item: chacha20poly1305::Error) -> Self {
|
||||
DBError(item.to_string())
|
||||
}
|
||||
}
|
@@ -1,4 +1,6 @@
|
||||
pub mod age; // NEW
|
||||
pub mod cmd;
|
||||
pub mod crypto;
|
||||
pub mod error;
|
||||
pub mod options;
|
||||
pub mod protocol;
|
@@ -2,7 +2,7 @@
|
||||
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
use redis_rs::server;
|
||||
use herodb::server;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
@@ -14,7 +14,6 @@ struct Args {
|
||||
#[arg(long)]
|
||||
dir: String,
|
||||
|
||||
|
||||
/// The port of the Redis server, default is 6379 if not specified
|
||||
#[arg(long)]
|
||||
port: Option<u16>,
|
||||
@@ -22,6 +21,15 @@ struct Args {
|
||||
/// Enable debug mode
|
||||
#[arg(long)]
|
||||
debug: bool,
|
||||
|
||||
|
||||
/// Master encryption key for encrypted databases
|
||||
#[arg(long)]
|
||||
encryption_key: Option<String>,
|
||||
|
||||
/// Encrypt the database
|
||||
#[arg(long)]
|
||||
encrypt: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@@ -37,10 +45,12 @@ async fn main() {
|
||||
.unwrap();
|
||||
|
||||
// new DB option
|
||||
let option = redis_rs::options::DBOption {
|
||||
let option = herodb::options::DBOption {
|
||||
dir: args.dir,
|
||||
port,
|
||||
debug: args.debug,
|
||||
encryption_key: args.encryption_key,
|
||||
encrypt: args.encrypt,
|
||||
};
|
||||
|
||||
// new server
|
@@ -3,4 +3,6 @@ pub struct DBOption {
|
||||
pub dir: String,
|
||||
pub port: u16,
|
||||
pub debug: bool,
|
||||
pub encrypt: bool,
|
||||
pub encryption_key: Option<String>, // Master encryption key
|
||||
}
|
@@ -8,6 +8,7 @@ pub enum Protocol {
|
||||
BulkString(String),
|
||||
Null,
|
||||
Array(Vec<Protocol>),
|
||||
Error(String), // NEW
|
||||
}
|
||||
|
||||
impl fmt::Display for Protocol {
|
||||
@@ -45,7 +46,7 @@ impl Protocol {
|
||||
|
||||
#[inline]
|
||||
pub fn err(msg: &str) -> Self {
|
||||
Protocol::SimpleString(msg.to_string())
|
||||
Protocol::Error(msg.to_string())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -69,6 +70,7 @@ impl Protocol {
|
||||
Protocol::BulkString(s) => s.to_string(),
|
||||
Protocol::Null => "".to_string(),
|
||||
Protocol::Array(s) => s.iter().map(|x| x.decode()).collect::<Vec<_>>().join(" "),
|
||||
Protocol::Error(s) => s.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,14 +79,10 @@ impl Protocol {
|
||||
Protocol::SimpleString(s) => format!("+{}\r\n", s),
|
||||
Protocol::BulkString(s) => format!("${}\r\n{}\r\n", s.len(), s),
|
||||
Protocol::Array(ss) => {
|
||||
format!("*{}\r\n", ss.len())
|
||||
+ ss.iter()
|
||||
.map(|x| x.encode())
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
.as_str()
|
||||
format!("*{}\r\n", ss.len()) + &ss.iter().map(|x| x.encode()).collect::<String>()
|
||||
}
|
||||
Protocol::Null => "$-1\r\n".to_string(),
|
||||
Protocol::Error(s) => format!("-{}\r\n", s), // proper RESP error
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
use core::str;
|
||||
use std::path::PathBuf;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
@@ -12,33 +12,65 @@ use crate::storage::Storage;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Server {
|
||||
pub storage: Arc<Storage>,
|
||||
pub db_cache: std::sync::Arc<std::sync::RwLock<HashMap<u64, Arc<Storage>>>>,
|
||||
pub option: options::DBOption,
|
||||
pub client_name: Option<String>,
|
||||
pub selected_db: u64, // Changed from usize to u64
|
||||
pub queued_cmd: Option<Vec<(Cmd, Protocol)>>,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub async fn new(option: options::DBOption) -> Self {
|
||||
// Create database file path with fixed filename
|
||||
let db_file_path = PathBuf::from(option.dir.clone()).join("herodb.redb");
|
||||
println!("will open db file path: {}", db_file_path.display());
|
||||
|
||||
// Initialize storage with redb
|
||||
let storage = Storage::new(db_file_path).expect("Failed to initialize storage");
|
||||
|
||||
Server {
|
||||
storage: Arc::new(storage),
|
||||
db_cache: Arc::new(std::sync::RwLock::new(HashMap::new())),
|
||||
option,
|
||||
client_name: None,
|
||||
selected_db: 0,
|
||||
queued_cmd: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn current_storage(&self) -> Result<Arc<Storage>, DBError> {
|
||||
let mut cache = self.db_cache.write().unwrap();
|
||||
|
||||
if let Some(storage) = cache.get(&self.selected_db) {
|
||||
return Ok(storage.clone());
|
||||
}
|
||||
|
||||
|
||||
// Create new database file
|
||||
let db_file_path = std::path::PathBuf::from(self.option.dir.clone())
|
||||
.join(format!("{}.db", self.selected_db));
|
||||
|
||||
// Ensure the directory exists before creating the database file
|
||||
if let Some(parent_dir) = db_file_path.parent() {
|
||||
std::fs::create_dir_all(parent_dir).map_err(|e| {
|
||||
DBError(format!("Failed to create directory {}: {}", parent_dir.display(), e))
|
||||
})?;
|
||||
}
|
||||
|
||||
println!("Creating new db file: {}", db_file_path.display());
|
||||
|
||||
let storage = Arc::new(Storage::new(
|
||||
db_file_path,
|
||||
self.should_encrypt_db(self.selected_db),
|
||||
self.option.encryption_key.as_deref()
|
||||
)?);
|
||||
|
||||
cache.insert(self.selected_db, storage.clone());
|
||||
Ok(storage)
|
||||
}
|
||||
|
||||
fn should_encrypt_db(&self, db_index: u64) -> bool {
|
||||
// DB 0-9 are non-encrypted, DB 10+ are encrypted
|
||||
self.option.encrypt && db_index >= 10
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
&mut self,
|
||||
mut stream: tokio::net::TcpStream,
|
||||
) -> Result<(), DBError> {
|
||||
let mut buf = [0; 512];
|
||||
let mut queued_cmd: Option<Vec<(Cmd, Protocol)>> = None;
|
||||
|
||||
loop {
|
||||
let len = match stream.read(&mut buf).await {
|
||||
@@ -73,16 +105,21 @@ impl Server {
|
||||
// Check if this is a QUIT command before processing
|
||||
let is_quit = matches!(cmd, Cmd::Quit);
|
||||
|
||||
let res = cmd
|
||||
.run(&mut self.clone(), protocol.clone(), &mut queued_cmd)
|
||||
.await
|
||||
.unwrap_or(Protocol::err("unknown cmd from server"));
|
||||
let res = match cmd.run(self).await {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
if self.option.debug {
|
||||
eprintln!("[run error] {:?}", e);
|
||||
}
|
||||
Protocol::err(&format!("ERR {}", e.0))
|
||||
}
|
||||
};
|
||||
|
||||
if self.option.debug {
|
||||
println!("\x1b[34;1mqueued cmd {:?}\x1b[0m", queued_cmd);
|
||||
println!("\x1b[34;1mqueued cmd {:?}\x1b[0m", self.queued_cmd);
|
||||
println!("\x1b[32;1mgoing to send response {}\x1b[0m", res.encode());
|
||||
} else {
|
||||
print!("queued cmd {:?}", queued_cmd);
|
||||
print!("queued cmd {:?}", self.queued_cmd);
|
||||
println!("going to send response {}", res.encode());
|
||||
}
|
||||
|
||||
@@ -95,6 +132,5 @@ impl Server {
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
126
herodb/src/storage/mod.rs
Normal file
126
herodb/src/storage/mod.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
use std::{
|
||||
path::Path,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use redb::{Database, TableDefinition};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::crypto::CryptoFactory;
|
||||
use crate::error::DBError;
|
||||
|
||||
// Re-export modules
|
||||
mod storage_basic;
|
||||
mod storage_hset;
|
||||
mod storage_lists;
|
||||
mod storage_extra;
|
||||
|
||||
// Re-export implementations
|
||||
// Note: These imports are used by the impl blocks in the submodules
|
||||
// The compiler shows them as unused because they're not directly used in this file
|
||||
// but they're needed for the Storage struct methods to be available
|
||||
pub use storage_extra::*;
|
||||
|
||||
// Table definitions for different Redis data types
|
||||
const TYPES_TABLE: TableDefinition<&str, &str> = TableDefinition::new("types");
|
||||
const STRINGS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("strings");
|
||||
const HASHES_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("hashes");
|
||||
const LISTS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("lists");
|
||||
const STREAMS_META_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("streams_meta");
|
||||
const STREAMS_DATA_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("streams_data");
|
||||
const ENCRYPTED_TABLE: TableDefinition<&str, u8> = TableDefinition::new("encrypted");
|
||||
const EXPIRATION_TABLE: TableDefinition<&str, u64> = TableDefinition::new("expiration");
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct StreamEntry {
|
||||
pub fields: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ListValue {
|
||||
pub elements: Vec<String>,
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn now_in_millis() -> u128 {
|
||||
let start = SystemTime::now();
|
||||
let duration_since_epoch = start.duration_since(UNIX_EPOCH).unwrap();
|
||||
duration_since_epoch.as_millis()
|
||||
}
|
||||
|
||||
pub struct Storage {
|
||||
db: Database,
|
||||
crypto: Option<CryptoFactory>,
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn new(path: impl AsRef<Path>, should_encrypt: bool, master_key: Option<&str>) -> Result<Self, DBError> {
|
||||
let db = Database::create(path)?;
|
||||
|
||||
// Create tables if they don't exist
|
||||
let write_txn = db.begin_write()?;
|
||||
{
|
||||
let _ = write_txn.open_table(TYPES_TABLE)?;
|
||||
let _ = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let _ = write_txn.open_table(HASHES_TABLE)?;
|
||||
let _ = write_txn.open_table(LISTS_TABLE)?;
|
||||
let _ = write_txn.open_table(STREAMS_META_TABLE)?;
|
||||
let _ = write_txn.open_table(STREAMS_DATA_TABLE)?;
|
||||
let _ = write_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
let _ = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
|
||||
// Check if database was previously encrypted
|
||||
let read_txn = db.begin_read()?;
|
||||
let encrypted_table = read_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
let was_encrypted = encrypted_table.get("encrypted")?.map(|v| v.value() == 1).unwrap_or(false);
|
||||
drop(read_txn);
|
||||
|
||||
let crypto = if should_encrypt || was_encrypted {
|
||||
if let Some(key) = master_key {
|
||||
Some(CryptoFactory::new(key.as_bytes()))
|
||||
} else {
|
||||
return Err(DBError("Encryption requested but no master key provided".to_string()));
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// If we're enabling encryption for the first time, mark it
|
||||
if should_encrypt && !was_encrypted {
|
||||
let write_txn = db.begin_write()?;
|
||||
{
|
||||
let mut encrypted_table = write_txn.open_table(ENCRYPTED_TABLE)?;
|
||||
encrypted_table.insert("encrypted", &1u8)?;
|
||||
}
|
||||
write_txn.commit()?;
|
||||
}
|
||||
|
||||
Ok(Storage {
|
||||
db,
|
||||
crypto,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_encrypted(&self) -> bool {
|
||||
self.crypto.is_some()
|
||||
}
|
||||
|
||||
// Helper methods for encryption
|
||||
fn encrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
|
||||
if let Some(crypto) = &self.crypto {
|
||||
Ok(crypto.encrypt(data))
|
||||
} else {
|
||||
Ok(data.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
fn decrypt_if_needed(&self, data: &[u8]) -> Result<Vec<u8>, DBError> {
|
||||
if let Some(crypto) = &self.crypto {
|
||||
Ok(crypto.decrypt(data)?)
|
||||
} else {
|
||||
Ok(data.to_vec())
|
||||
}
|
||||
}
|
||||
}
|
218
herodb/src/storage/storage_basic.rs
Normal file
218
herodb/src/storage/storage_basic.rs
Normal file
@@ -0,0 +1,218 @@
|
||||
use redb::{ReadableTable};
|
||||
use crate::error::DBError;
|
||||
use super::*;
|
||||
|
||||
impl Storage {
|
||||
pub fn flushdb(&self) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
let mut streams_meta_table = write_txn.open_table(STREAMS_META_TABLE)?;
|
||||
let mut streams_data_table = write_txn.open_table(STREAMS_DATA_TABLE)?;
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
|
||||
// inefficient, but there is no other way
|
||||
let keys: Vec<String> = types_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect();
|
||||
for key in keys {
|
||||
types_table.remove(key.as_str())?;
|
||||
}
|
||||
let keys: Vec<String> = strings_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect();
|
||||
for key in keys {
|
||||
strings_table.remove(key.as_str())?;
|
||||
}
|
||||
let keys: Vec<(String, String)> = hashes_table
|
||||
.iter()?
|
||||
.map(|item| {
|
||||
let binding = item.unwrap();
|
||||
let (k, f) = binding.0.value();
|
||||
(k.to_string(), f.to_string())
|
||||
})
|
||||
.collect();
|
||||
for (key, field) in keys {
|
||||
hashes_table.remove((key.as_str(), field.as_str()))?;
|
||||
}
|
||||
let keys: Vec<String> = lists_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect();
|
||||
for key in keys {
|
||||
lists_table.remove(key.as_str())?;
|
||||
}
|
||||
let keys: Vec<String> = streams_meta_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect();
|
||||
for key in keys {
|
||||
streams_meta_table.remove(key.as_str())?;
|
||||
}
|
||||
let keys: Vec<(String,String)> = streams_data_table.iter()?.map(|item| {
|
||||
let binding = item.unwrap();
|
||||
let (key, field) = binding.0.value();
|
||||
(key.to_string(), field.to_string())
|
||||
}).collect();
|
||||
for (key, field) in keys {
|
||||
streams_data_table.remove((key.as_str(), field.as_str()))?;
|
||||
}
|
||||
let keys: Vec<String> = expiration_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect();
|
||||
for key in keys {
|
||||
expiration_table.remove(key.as_str())?;
|
||||
}
|
||||
}
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_key_type(&self, key: &str) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
// Before returning type, check for expiration
|
||||
if let Some(type_val) = table.get(key)? {
|
||||
if type_val.value() == "string" {
|
||||
let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?;
|
||||
if let Some(expires_at) = expiration_table.get(key)? {
|
||||
if now_in_millis() > expires_at.value() as u128 {
|
||||
// The key is expired, so it effectively has no type
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Some(type_val.value().to_string()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Value is encrypted/decrypted
|
||||
pub fn get(&self, key: &str) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "string" => {
|
||||
// Check expiration first (unencrypted)
|
||||
let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?;
|
||||
if let Some(expires_at) = expiration_table.get(key)? {
|
||||
if now_in_millis() > expires_at.value() as u128 {
|
||||
drop(read_txn);
|
||||
self.del(key.to_string())?;
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
// Get and decrypt value
|
||||
let strings_table = read_txn.open_table(STRINGS_TABLE)?;
|
||||
match strings_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
Ok(Some(value))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Value is encrypted before storage
|
||||
pub fn set(&self, key: String, value: String) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.insert(key.as_str(), "string")?;
|
||||
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
// Only encrypt the value, not expiration
|
||||
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
|
||||
strings_table.insert(key.as_str(), encrypted.as_slice())?;
|
||||
|
||||
// Remove any existing expiration since this is a regular SET
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
expiration_table.remove(key.as_str())?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Value is encrypted before storage
|
||||
pub fn setx(&self, key: String, value: String, expire_ms: u128) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.insert(key.as_str(), "string")?;
|
||||
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
// Only encrypt the value
|
||||
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
|
||||
strings_table.insert(key.as_str(), encrypted.as_slice())?;
|
||||
|
||||
// Store expiration separately (unencrypted)
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
let expires_at = expire_ms + now_in_millis();
|
||||
expiration_table.insert(key.as_str(), &(expires_at as u64))?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn del(&self, key: String) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut strings_table = write_txn.open_table(STRINGS_TABLE)?;
|
||||
let mut hashes_table: redb::Table<(&str, &str), &[u8]> = write_txn.open_table(HASHES_TABLE)?;
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
// Remove from type table
|
||||
types_table.remove(key.as_str())?;
|
||||
|
||||
// Remove from strings table
|
||||
strings_table.remove(key.as_str())?;
|
||||
|
||||
// Remove all hash fields for this key
|
||||
let mut to_remove = Vec::new();
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
if hash_key == key.as_str() {
|
||||
to_remove.push((hash_key.to_string(), field.to_string()));
|
||||
}
|
||||
}
|
||||
drop(iter);
|
||||
|
||||
for (hash_key, field) in to_remove {
|
||||
hashes_table.remove((hash_key.as_str(), field.as_str()))?;
|
||||
}
|
||||
|
||||
// Remove from lists table
|
||||
lists_table.remove(key.as_str())?;
|
||||
|
||||
// Also remove expiration
|
||||
let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?;
|
||||
expiration_table.remove(key.as_str())?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn keys(&self, pattern: &str) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let mut keys = Vec::new();
|
||||
let mut iter = table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let key = entry?.0.value().to_string();
|
||||
if pattern == "*" || super::storage_extra::glob_match(pattern, &key) {
|
||||
keys.push(key);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
}
|
168
herodb/src/storage/storage_extra.rs
Normal file
168
herodb/src/storage/storage_extra.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
use redb::{ReadableTable};
|
||||
use crate::error::DBError;
|
||||
use super::*;
|
||||
|
||||
impl Storage {
|
||||
// ✅ ENCRYPTION APPLIED: Values are decrypted after retrieval
|
||||
pub fn scan(&self, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let strings_table = read_txn.open_table(STRINGS_TABLE)?;
|
||||
|
||||
let mut result = Vec::new();
|
||||
let mut current_cursor = 0u64;
|
||||
let limit = count.unwrap_or(10) as usize;
|
||||
|
||||
let mut iter = types_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let key = entry.0.value().to_string();
|
||||
let key_type = entry.1.value().to_string();
|
||||
|
||||
if current_cursor >= cursor {
|
||||
// Apply pattern matching if specified
|
||||
let matches = if let Some(pat) = pattern {
|
||||
glob_match(pat, &key)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
if matches {
|
||||
// For scan, we return key-value pairs for string types
|
||||
if key_type == "string" {
|
||||
if let Some(data) = strings_table.get(key.as_str())? {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
result.push((key, value));
|
||||
} else {
|
||||
result.push((key, String::new()));
|
||||
}
|
||||
} else {
|
||||
// For non-string types, just return the key with type as value
|
||||
result.push((key, key_type));
|
||||
}
|
||||
|
||||
if result.len() >= limit {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
current_cursor += 1;
|
||||
}
|
||||
|
||||
let next_cursor = if result.len() < limit { 0 } else { current_cursor };
|
||||
Ok((next_cursor, result))
|
||||
}
|
||||
|
||||
pub fn ttl(&self, key: &str) -> Result<i64, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "string" => {
|
||||
let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?;
|
||||
match expiration_table.get(key)? {
|
||||
Some(expires_at) => {
|
||||
let now = now_in_millis();
|
||||
let expires_at_ms = expires_at.value() as u128;
|
||||
if now >= expires_at_ms {
|
||||
Ok(-2) // Key has expired
|
||||
} else {
|
||||
Ok(((expires_at_ms - now) / 1000) as i64) // TTL in seconds
|
||||
}
|
||||
}
|
||||
None => Ok(-1), // Key exists but has no expiration
|
||||
}
|
||||
}
|
||||
Some(_) => Ok(-1), // Key exists but is not a string (no expiration support for other types)
|
||||
None => Ok(-2), // Key does not exist
|
||||
}
|
||||
}
|
||||
|
||||
pub fn exists(&self, key: &str) -> Result<bool, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "string" => {
|
||||
// Check if string key has expired
|
||||
let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?;
|
||||
if let Some(expires_at) = expiration_table.get(key)? {
|
||||
if now_in_millis() > expires_at.value() as u128 {
|
||||
return Ok(false); // Key has expired
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
Some(_) => Ok(true), // Key exists and is not a string
|
||||
None => Ok(false), // Key does not exist
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Utility function for glob pattern matching
|
||||
pub fn glob_match(pattern: &str, text: &str) -> bool {
|
||||
if pattern == "*" {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Simple glob matching - supports * and ? wildcards
|
||||
let pattern_chars: Vec<char> = pattern.chars().collect();
|
||||
let text_chars: Vec<char> = text.chars().collect();
|
||||
|
||||
fn match_recursive(pattern: &[char], text: &[char], pi: usize, ti: usize) -> bool {
|
||||
if pi >= pattern.len() {
|
||||
return ti >= text.len();
|
||||
}
|
||||
|
||||
if ti >= text.len() {
|
||||
// Check if remaining pattern is all '*'
|
||||
return pattern[pi..].iter().all(|&c| c == '*');
|
||||
}
|
||||
|
||||
match pattern[pi] {
|
||||
'*' => {
|
||||
// Try matching zero or more characters
|
||||
for i in ti..=text.len() {
|
||||
if match_recursive(pattern, text, pi + 1, i) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
'?' => {
|
||||
// Match exactly one character
|
||||
match_recursive(pattern, text, pi + 1, ti + 1)
|
||||
}
|
||||
c => {
|
||||
// Match exact character
|
||||
if text[ti] == c {
|
||||
match_recursive(pattern, text, pi + 1, ti + 1)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match_recursive(&pattern_chars, &text_chars, 0, 0)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_glob_match() {
|
||||
assert!(glob_match("*", "anything"));
|
||||
assert!(glob_match("hello", "hello"));
|
||||
assert!(!glob_match("hello", "world"));
|
||||
assert!(glob_match("h*o", "hello"));
|
||||
assert!(glob_match("h*o", "ho"));
|
||||
assert!(!glob_match("h*o", "hi"));
|
||||
assert!(glob_match("h?llo", "hello"));
|
||||
assert!(!glob_match("h?llo", "hllo"));
|
||||
assert!(glob_match("*test*", "this_is_a_test_string"));
|
||||
assert!(!glob_match("*test*", "this_is_a_string"));
|
||||
}
|
||||
}
|
389
herodb/src/storage/storage_hset.rs
Normal file
389
herodb/src/storage/storage_hset.rs
Normal file
@@ -0,0 +1,389 @@
|
||||
use redb::{ReadableTable};
|
||||
use crate::error::DBError;
|
||||
use super::*;
|
||||
|
||||
impl Storage {
|
||||
// ✅ ENCRYPTION APPLIED: Values are encrypted before storage
|
||||
pub fn hset(&self, key: &str, pairs: Vec<(String, String)>) -> Result<i64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut new_fields = 0i64;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") | None => { // Proceed if hash or new key
|
||||
// Set the type to hash (only if new key or existing hash)
|
||||
types_table.insert(key, "hash")?;
|
||||
|
||||
for (field, value) in pairs {
|
||||
// Check if field already exists
|
||||
let exists = hashes_table.get((key, field.as_str()))?.is_some();
|
||||
|
||||
// Encrypt the value before storing
|
||||
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
|
||||
hashes_table.insert((key, field.as_str()), encrypted.as_slice())?;
|
||||
|
||||
if !exists {
|
||||
new_fields += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(new_fields)
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Value is decrypted after retrieval
|
||||
pub fn hget(&self, key: &str, field: &str) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let key_type = types_table.get(key)?.map(|v| v.value().to_string());
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
match hashes_table.get((key, field))? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
Ok(Some(value))
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: All values are decrypted after retrieval
|
||||
pub fn hgetall(&self, key: &str) -> Result<Vec<(String, String)>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
if hash_key == key {
|
||||
let decrypted = self.decrypt_if_needed(entry.1.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
result.push((field.to_string(), value));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hdel(&self, key: &str, fields: Vec<String>) -> Result<i64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut deleted = 0i64;
|
||||
|
||||
// First check if key exists and is a hash
|
||||
let key_type = {
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
|
||||
for field in fields {
|
||||
if hashes_table.remove((key, field.as_str()))?.is_some() {
|
||||
deleted += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if hash is now empty and remove type if so
|
||||
let mut has_fields = false;
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, _) = entry.0.value();
|
||||
if hash_key == key {
|
||||
has_fields = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
drop(iter);
|
||||
|
||||
if !has_fields {
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
}
|
||||
}
|
||||
Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => {} // Key does not exist, nothing to delete, return 0 deleted
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(deleted)
|
||||
}
|
||||
|
||||
pub fn hexists(&self, key: &str, field: &str) -> Result<bool, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
Ok(hashes_table.get((key, field))?.is_some())
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hkeys(&self, key: &str) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
if hash_key == key {
|
||||
result.push(field.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: All values are decrypted after retrieval
|
||||
pub fn hvals(&self, key: &str) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, _) = entry.0.value();
|
||||
if hash_key == key {
|
||||
let decrypted = self.decrypt_if_needed(entry.1.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
result.push(value);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hlen(&self, key: &str) -> Result<i64, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut count = 0i64;
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, _) = entry.0.value();
|
||||
if hash_key == key {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(count)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(0),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Values are decrypted after retrieval
|
||||
pub fn hmget(&self, key: &str, fields: Vec<String>) -> Result<Vec<Option<String>>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
for field in fields {
|
||||
match hashes_table.get((key, field.as_str()))? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
result.push(Some(value));
|
||||
}
|
||||
None => result.push(None),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok(fields.into_iter().map(|_| None).collect()),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Value is encrypted before storage
|
||||
pub fn hsetnx(&self, key: &str, field: &str, value: &str) -> Result<bool, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut result = false;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut hashes_table = write_txn.open_table(HASHES_TABLE)?;
|
||||
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") | None => { // Proceed if hash or new key
|
||||
// Check if field already exists
|
||||
if hashes_table.get((key, field))?.is_none() {
|
||||
// Set the type to hash (only if new key or existing hash)
|
||||
types_table.insert(key, "hash")?;
|
||||
|
||||
// Encrypt the value before storing
|
||||
let encrypted = self.encrypt_if_needed(value.as_bytes())?;
|
||||
hashes_table.insert((key, field), encrypted.as_slice())?;
|
||||
result = true;
|
||||
}
|
||||
}
|
||||
Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Values are decrypted after retrieval
|
||||
pub fn hscan(&self, key: &str, cursor: u64, pattern: Option<&str>, count: Option<u64>) -> Result<(u64, Vec<(String, String)>), DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
let key_type = {
|
||||
let access_guard = types_table.get(key)?;
|
||||
access_guard.map(|v| v.value().to_string())
|
||||
};
|
||||
|
||||
match key_type.as_deref() {
|
||||
Some("hash") => {
|
||||
let hashes_table = read_txn.open_table(HASHES_TABLE)?;
|
||||
let mut result = Vec::new();
|
||||
let mut current_cursor = 0u64;
|
||||
let limit = count.unwrap_or(10) as usize;
|
||||
|
||||
let mut iter = hashes_table.iter()?;
|
||||
while let Some(entry) = iter.next() {
|
||||
let entry = entry?;
|
||||
let (hash_key, field) = entry.0.value();
|
||||
|
||||
if hash_key == key {
|
||||
if current_cursor >= cursor {
|
||||
let field_str = field.to_string();
|
||||
|
||||
// Apply pattern matching if specified
|
||||
let matches = if let Some(pat) = pattern {
|
||||
super::storage_extra::glob_match(pat, &field_str)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
if matches {
|
||||
let decrypted = self.decrypt_if_needed(entry.1.value())?;
|
||||
let value = String::from_utf8(decrypted)?;
|
||||
result.push((field_str, value));
|
||||
|
||||
if result.len() >= limit {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
current_cursor += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let next_cursor = if result.len() < limit { 0 } else { current_cursor };
|
||||
Ok((next_cursor, result))
|
||||
}
|
||||
Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())),
|
||||
None => Ok((0, Vec::new())),
|
||||
}
|
||||
}
|
||||
}
|
403
herodb/src/storage/storage_lists.rs
Normal file
403
herodb/src/storage/storage_lists.rs
Normal file
@@ -0,0 +1,403 @@
|
||||
use redb::{ReadableTable};
|
||||
use crate::error::DBError;
|
||||
use super::*;
|
||||
|
||||
impl Storage {
|
||||
// ✅ ENCRYPTION APPLIED: Elements are encrypted before storage
|
||||
pub fn lpush(&self, key: &str, elements: Vec<String>) -> Result<i64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut _length = 0i64;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
// Set the type to list
|
||||
types_table.insert(key, "list")?;
|
||||
|
||||
// Get current list or create empty one
|
||||
let mut list: Vec<String> = match lists_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
serde_json::from_slice(&decrypted)?
|
||||
}
|
||||
None => Vec::new(),
|
||||
};
|
||||
|
||||
// Add elements to the front (left)
|
||||
for element in elements.into_iter() {
|
||||
list.insert(0, element);
|
||||
}
|
||||
|
||||
_length = list.len() as i64;
|
||||
|
||||
// Encrypt and store the updated list
|
||||
let serialized = serde_json::to_vec(&list)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(_length)
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are encrypted before storage
|
||||
pub fn rpush(&self, key: &str, elements: Vec<String>) -> Result<i64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut _length = 0i64;
|
||||
|
||||
{
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
// Set the type to list
|
||||
types_table.insert(key, "list")?;
|
||||
|
||||
// Get current list or create empty one
|
||||
let mut list: Vec<String> = match lists_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
serde_json::from_slice(&decrypted)?
|
||||
}
|
||||
None => Vec::new(),
|
||||
};
|
||||
|
||||
// Add elements to the end (right)
|
||||
list.extend(elements);
|
||||
_length = list.len() as i64;
|
||||
|
||||
// Encrypt and store the updated list
|
||||
let serialized = serde_json::to_vec(&list)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(_length)
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval and encrypted before storage
|
||||
pub fn lpop(&self, key: &str, count: u64) -> Result<Vec<String>, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
// First check if key exists and is a list, and get the data
|
||||
let list_data = {
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
let result = match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
if let Some(data) = lists_table.get(key)? {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
Some(list)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
result
|
||||
};
|
||||
|
||||
if let Some(mut list) = list_data {
|
||||
let pop_count = std::cmp::min(count as usize, list.len());
|
||||
for _ in 0..pop_count {
|
||||
if !list.is_empty() {
|
||||
result.push(list.remove(0));
|
||||
}
|
||||
}
|
||||
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
if list.is_empty() {
|
||||
// Remove the key if list is empty
|
||||
lists_table.remove(key)?;
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
} else {
|
||||
// Encrypt and store the updated list
|
||||
let serialized = serde_json::to_vec(&list)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval and encrypted before storage
|
||||
pub fn rpop(&self, key: &str, count: u64) -> Result<Vec<String>, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut result = Vec::new();
|
||||
|
||||
// First check if key exists and is a list, and get the data
|
||||
let list_data = {
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
let result = match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
if let Some(data) = lists_table.get(key)? {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
Some(list)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
result
|
||||
};
|
||||
|
||||
if let Some(mut list) = list_data {
|
||||
let pop_count = std::cmp::min(count as usize, list.len());
|
||||
for _ in 0..pop_count {
|
||||
if !list.is_empty() {
|
||||
result.push(list.pop().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
if list.is_empty() {
|
||||
// Remove the key if list is empty
|
||||
lists_table.remove(key)?;
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
} else {
|
||||
// Encrypt and store the updated list
|
||||
let serialized = serde_json::to_vec(&list)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn llen(&self, key: &str) -> Result<i64, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
let lists_table = read_txn.open_table(LISTS_TABLE)?;
|
||||
match lists_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
Ok(list.len() as i64)
|
||||
}
|
||||
None => Ok(0),
|
||||
}
|
||||
}
|
||||
_ => Ok(0),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Element is decrypted after retrieval
|
||||
pub fn lindex(&self, key: &str, index: i64) -> Result<Option<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
let lists_table = read_txn.open_table(LISTS_TABLE)?;
|
||||
match lists_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
|
||||
let actual_index = if index < 0 {
|
||||
list.len() as i64 + index
|
||||
} else {
|
||||
index
|
||||
};
|
||||
|
||||
if actual_index >= 0 && (actual_index as usize) < list.len() {
|
||||
Ok(Some(list[actual_index as usize].clone()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval
|
||||
pub fn lrange(&self, key: &str, start: i64, stop: i64) -> Result<Vec<String>, DBError> {
|
||||
let read_txn = self.db.begin_read()?;
|
||||
let types_table = read_txn.open_table(TYPES_TABLE)?;
|
||||
|
||||
match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
let lists_table = read_txn.open_table(LISTS_TABLE)?;
|
||||
match lists_table.get(key)? {
|
||||
Some(data) => {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
|
||||
if list.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let len = list.len() as i64;
|
||||
let start_idx = if start < 0 { std::cmp::max(0, len + start) } else { std::cmp::min(start, len) };
|
||||
let stop_idx = if stop < 0 { std::cmp::max(-1, len + stop) } else { std::cmp::min(stop, len - 1) };
|
||||
|
||||
if start_idx > stop_idx || start_idx >= len {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let start_usize = start_idx as usize;
|
||||
let stop_usize = (stop_idx + 1) as usize;
|
||||
|
||||
Ok(list[start_usize..std::cmp::min(stop_usize, list.len())].to_vec())
|
||||
}
|
||||
None => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
_ => Ok(Vec::new()),
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval and encrypted before storage
|
||||
pub fn ltrim(&self, key: &str, start: i64, stop: i64) -> Result<(), DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
|
||||
// First check if key exists and is a list, and get the data
|
||||
let list_data = {
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
let result = match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
if let Some(data) = lists_table.get(key)? {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
Some(list)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
result
|
||||
};
|
||||
|
||||
if let Some(list) = list_data {
|
||||
if list.is_empty() {
|
||||
write_txn.commit()?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let len = list.len() as i64;
|
||||
let start_idx = if start < 0 { std::cmp::max(0, len + start) } else { std::cmp::min(start, len) };
|
||||
let stop_idx = if stop < 0 { std::cmp::max(-1, len + stop) } else { std::cmp::min(stop, len - 1) };
|
||||
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
if start_idx > stop_idx || start_idx >= len {
|
||||
// Remove the entire list
|
||||
lists_table.remove(key)?;
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
} else {
|
||||
let start_usize = start_idx as usize;
|
||||
let stop_usize = (stop_idx + 1) as usize;
|
||||
let trimmed = list[start_usize..std::cmp::min(stop_usize, list.len())].to_vec();
|
||||
|
||||
if trimmed.is_empty() {
|
||||
lists_table.remove(key)?;
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
} else {
|
||||
// Encrypt and store the trimmed list
|
||||
let serialized = serde_json::to_vec(&trimmed)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval and encrypted before storage
|
||||
pub fn lrem(&self, key: &str, count: i64, element: &str) -> Result<i64, DBError> {
|
||||
let write_txn = self.db.begin_write()?;
|
||||
let mut removed = 0i64;
|
||||
|
||||
// First check if key exists and is a list, and get the data
|
||||
let list_data = {
|
||||
let types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
let lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
|
||||
let result = match types_table.get(key)? {
|
||||
Some(type_val) if type_val.value() == "list" => {
|
||||
if let Some(data) = lists_table.get(key)? {
|
||||
let decrypted = self.decrypt_if_needed(data.value())?;
|
||||
let list: Vec<String> = serde_json::from_slice(&decrypted)?;
|
||||
Some(list)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
result
|
||||
};
|
||||
|
||||
if let Some(mut list) = list_data {
|
||||
if count == 0 {
|
||||
// Remove all occurrences
|
||||
let original_len = list.len();
|
||||
list.retain(|x| x != element);
|
||||
removed = (original_len - list.len()) as i64;
|
||||
} else if count > 0 {
|
||||
// Remove first count occurrences
|
||||
let mut to_remove = count as usize;
|
||||
list.retain(|x| {
|
||||
if x == element && to_remove > 0 {
|
||||
to_remove -= 1;
|
||||
removed += 1;
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Remove last |count| occurrences
|
||||
let mut to_remove = (-count) as usize;
|
||||
for i in (0..list.len()).rev() {
|
||||
if list[i] == element && to_remove > 0 {
|
||||
list.remove(i);
|
||||
to_remove -= 1;
|
||||
removed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut lists_table = write_txn.open_table(LISTS_TABLE)?;
|
||||
if list.is_empty() {
|
||||
lists_table.remove(key)?;
|
||||
let mut types_table = write_txn.open_table(TYPES_TABLE)?;
|
||||
types_table.remove(key)?;
|
||||
} else {
|
||||
// Encrypt and store the updated list
|
||||
let serialized = serde_json::to_vec(&list)?;
|
||||
let encrypted = self.encrypt_if_needed(&serialized)?;
|
||||
lists_table.insert(key, encrypted.as_slice())?;
|
||||
}
|
||||
}
|
||||
|
||||
write_txn.commit()?;
|
||||
Ok(removed)
|
||||
}
|
||||
}
|
@@ -288,7 +288,7 @@ main() {
|
||||
|
||||
# Build the project
|
||||
print_status "Building HeroDB..."
|
||||
if ! cargo build --release; then
|
||||
if ! cargo build -p herodb --release; then
|
||||
print_error "Failed to build HeroDB"
|
||||
exit 1
|
||||
fi
|
||||
@@ -298,7 +298,7 @@ main() {
|
||||
|
||||
# Start the server
|
||||
print_status "Starting HeroDB server..."
|
||||
./target/release/redis-rs --dir "$DB_DIR" --port $PORT &
|
||||
./target/release/herodb --dir "$DB_DIR" --port $PORT &
|
||||
SERVER_PID=$!
|
||||
|
||||
# Wait for server to start
|
@@ -1,4 +1,4 @@
|
||||
use redis_rs::{server::Server, options::DBOption};
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
@@ -25,6 +25,8 @@ async fn debug_hset_simple() {
|
||||
dir: test_dir.to_string(),
|
||||
port,
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let mut server = Server::new(option).await;
|
@@ -1,4 +1,4 @@
|
||||
use redis_rs::{server::Server, options::DBOption};
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
@@ -16,6 +16,8 @@ async fn debug_hset_return_value() {
|
||||
dir: test_dir.to_string(),
|
||||
port: 16390,
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let mut server = Server::new(option).await;
|
@@ -1,5 +1,5 @@
|
||||
use redis_rs::protocol::Protocol;
|
||||
use redis_rs::cmd::Cmd;
|
||||
use herodb::protocol::Protocol;
|
||||
use herodb::cmd::Cmd;
|
||||
|
||||
#[test]
|
||||
fn test_protocol_parsing() {
|
@@ -1,4 +1,4 @@
|
||||
use redis::{Client, Commands, Connection};
|
||||
use redis::{Client, Commands, Connection, RedisResult};
|
||||
use std::process::{Child, Command};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
@@ -77,6 +77,7 @@ fn setup_server() -> (ServerProcessGuard, u16) {
|
||||
&test_dir,
|
||||
"--port",
|
||||
&port.to_string(),
|
||||
"--debug",
|
||||
])
|
||||
.spawn()
|
||||
.expect("Failed to start server process");
|
||||
@@ -88,214 +89,147 @@ fn setup_server() -> (ServerProcessGuard, u16) {
|
||||
};
|
||||
|
||||
// Give the server a moment to start
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
std::thread::sleep(Duration::from_millis(500));
|
||||
|
||||
(guard, port)
|
||||
}
|
||||
|
||||
async fn cleanup_keys(conn: &mut Connection) {
|
||||
let keys: Vec<String> = redis::cmd("KEYS").arg("*").query(conn).unwrap();
|
||||
if !keys.is_empty() {
|
||||
for key in keys {
|
||||
let _: () = redis::cmd("DEL").arg(key).query(conn).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn all_tests() {
|
||||
let (_server_guard, port) = setup_server();
|
||||
let mut conn = get_redis_connection(port);
|
||||
|
||||
// Run all tests using the same connection
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_basic_ping(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_string_operations(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_incr_operations(&mut conn).await;
|
||||
// cleanup_keys(&mut conn).await;
|
||||
// test_hash_operations(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_hash_operations(&mut conn).await;
|
||||
test_expiration(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_scan_operations(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_scan_with_count(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_hscan_operations(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_transaction_operations(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_discard_transaction(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_type_command(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_config_commands(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_info_command(&mut conn).await;
|
||||
cleanup_keys(&mut conn).await;
|
||||
test_error_handling(&mut conn).await;
|
||||
|
||||
// Clean up keys after all tests
|
||||
cleanup_keys(&mut conn).await;
|
||||
}
|
||||
|
||||
async fn cleanup_keys(conn: &mut Connection) {
|
||||
let keys: Vec<String> = redis::cmd("KEYS").arg("*").query(conn).unwrap();
|
||||
if !keys.is_empty() {
|
||||
let _: () = redis::cmd("DEL").arg(keys).query(conn).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn test_basic_ping(conn: &mut Connection) {
|
||||
cleanup_keys(conn).await;
|
||||
let result: String = redis::cmd("PING").query(conn).unwrap();
|
||||
assert_eq!(result, "PONG");
|
||||
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_string_operations(conn: &mut Connection) {
|
||||
// Test SET
|
||||
cleanup_keys(conn).await;
|
||||
let _: () = conn.set("key", "value").unwrap();
|
||||
|
||||
// Test GET
|
||||
let result: String = conn.get("key").unwrap();
|
||||
assert_eq!(result, "value");
|
||||
|
||||
// Test GET non-existent key
|
||||
let result: Option<String> = conn.get("noexist").unwrap();
|
||||
assert_eq!(result, None);
|
||||
|
||||
// Test DEL
|
||||
let deleted: i32 = conn.del("key").unwrap();
|
||||
assert_eq!(deleted, 1);
|
||||
|
||||
// Test GET after DEL
|
||||
let result: Option<String> = conn.get("key").unwrap();
|
||||
assert_eq!(result, None);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_incr_operations(conn: &mut Connection) {
|
||||
// Test INCR on non-existent key
|
||||
cleanup_keys(conn).await;
|
||||
let result: i32 = redis::cmd("INCR").arg("counter").query(conn).unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
// Test INCR on existing key
|
||||
let result: i32 = redis::cmd("INCR").arg("counter").query(conn).unwrap();
|
||||
assert_eq!(result, 2);
|
||||
|
||||
// Test INCR on string value (should fail)
|
||||
let _: () = conn.set("string", "hello").unwrap();
|
||||
let result: Result<i32, _> = redis::cmd("INCR").arg("string").query(conn);
|
||||
let result: RedisResult<i32> = redis::cmd("INCR").arg("string").query(conn);
|
||||
assert!(result.is_err());
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_hash_operations(conn: &mut Connection) {
|
||||
// Test HSET
|
||||
cleanup_keys(conn).await;
|
||||
let result: i32 = conn.hset("hash", "field1", "value1").unwrap();
|
||||
assert_eq!(result, 1); // 1 new field
|
||||
|
||||
// Test HGET
|
||||
assert_eq!(result, 1);
|
||||
let result: String = conn.hget("hash", "field1").unwrap();
|
||||
assert_eq!(result, "value1");
|
||||
|
||||
// Test HSET multiple fields
|
||||
let _: () = conn.hset_multiple("hash", &[("field2", "value2"), ("field3", "value3")]).unwrap();
|
||||
|
||||
// Test HGETALL
|
||||
let _: () = conn.hset("hash", "field2", "value2").unwrap();
|
||||
let _: () = conn.hset("hash", "field3", "value3").unwrap();
|
||||
let result: std::collections::HashMap<String, String> = conn.hgetall("hash").unwrap();
|
||||
assert_eq!(result.len(), 3);
|
||||
assert_eq!(result.get("field1").unwrap(), "value1");
|
||||
assert_eq!(result.get("field2").unwrap(), "value2");
|
||||
assert_eq!(result.get("field3").unwrap(), "value3");
|
||||
|
||||
// Test HLEN
|
||||
let result: i32 = conn.hlen("hash").unwrap();
|
||||
assert_eq!(result, 3);
|
||||
|
||||
// Test HEXISTS
|
||||
let result: bool = conn.hexists("hash", "field1").unwrap();
|
||||
assert_eq!(result, true);
|
||||
|
||||
let result: bool = conn.hexists("hash", "noexist").unwrap();
|
||||
assert_eq!(result, false);
|
||||
|
||||
// Test HDEL
|
||||
let result: i32 = conn.hdel("hash", "field1").unwrap();
|
||||
assert_eq!(result, 1);
|
||||
|
||||
// Test HKEYS
|
||||
let mut result: Vec<String> = conn.hkeys("hash").unwrap();
|
||||
result.sort();
|
||||
assert_eq!(result, vec!["field2", "field3"]);
|
||||
|
||||
// Test HVALS
|
||||
let mut result: Vec<String> = conn.hvals("hash").unwrap();
|
||||
result.sort();
|
||||
assert_eq!(result, vec!["value2", "value3"]);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_expiration(conn: &mut Connection) {
|
||||
// Test SETEX (expire in 1 second)
|
||||
cleanup_keys(conn).await;
|
||||
let _: () = conn.set_ex("expkey", "value", 1).unwrap();
|
||||
|
||||
// Test TTL
|
||||
let result: i32 = conn.ttl("expkey").unwrap();
|
||||
assert!(result == 1 || result == 0); // Should be 1 or 0 seconds
|
||||
|
||||
// Test EXISTS
|
||||
assert!(result == 1 || result == 0);
|
||||
let result: bool = conn.exists("expkey").unwrap();
|
||||
assert_eq!(result, true);
|
||||
|
||||
// Wait for expiration
|
||||
sleep(Duration::from_millis(1100)).await;
|
||||
|
||||
// Test GET after expiration
|
||||
let result: Option<String> = conn.get("expkey").unwrap();
|
||||
assert_eq!(result, None);
|
||||
|
||||
// Test TTL after expiration
|
||||
let result: i32 = conn.ttl("expkey").unwrap();
|
||||
assert_eq!(result, -2); // Key doesn't exist
|
||||
|
||||
// Test EXISTS after expiration
|
||||
assert_eq!(result, -2);
|
||||
let result: bool = conn.exists("expkey").unwrap();
|
||||
assert_eq!(result, false);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_scan_operations(conn: &mut Connection) {
|
||||
// Set up test data
|
||||
cleanup_keys(conn).await;
|
||||
for i in 0..5 {
|
||||
let _: () = conn.set(format!("key{}", i), format!("value{}", i)).unwrap();
|
||||
}
|
||||
|
||||
// Test SCAN
|
||||
let result: (u64, Vec<String>) = redis::cmd("SCAN")
|
||||
.arg(0)
|
||||
.arg("MATCH")
|
||||
.arg("*")
|
||||
.arg("key*")
|
||||
.arg("COUNT")
|
||||
.arg(10)
|
||||
.query(conn)
|
||||
.unwrap();
|
||||
|
||||
let (cursor, keys) = result;
|
||||
assert_eq!(cursor, 0); // Should complete in one scan
|
||||
assert_eq!(cursor, 0);
|
||||
assert_eq!(keys.len(), 5);
|
||||
|
||||
// Test KEYS
|
||||
let mut result: Vec<String> = redis::cmd("KEYS").arg("*").query(conn).unwrap();
|
||||
result.sort();
|
||||
assert_eq!(result, vec!["key0", "key1", "key2", "key3", "key4"]);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_scan_with_count(conn: &mut Connection) {
|
||||
// Clean up previous keys
|
||||
let keys: Vec<String> = redis::cmd("KEYS").arg("scan_key*").query(conn).unwrap();
|
||||
if !keys.is_empty() {
|
||||
let _: () = redis::cmd("DEL").arg(keys).query(conn).unwrap();
|
||||
}
|
||||
|
||||
// Set up test data
|
||||
cleanup_keys(conn).await;
|
||||
for i in 0..15 {
|
||||
let _: () = conn.set(format!("scan_key{}", i), i).unwrap();
|
||||
}
|
||||
|
||||
let mut cursor = 0;
|
||||
let mut all_keys = std::collections::HashSet::new();
|
||||
|
||||
// First SCAN
|
||||
loop {
|
||||
let (next_cursor, keys): (u64, Vec<String>) = redis::cmd("SCAN")
|
||||
.arg(cursor)
|
||||
.arg("MATCH")
|
||||
@@ -304,57 +238,23 @@ async fn test_scan_with_count(conn: &mut Connection) {
|
||||
.arg(5)
|
||||
.query(conn)
|
||||
.unwrap();
|
||||
|
||||
assert_ne!(next_cursor, 0);
|
||||
assert_eq!(keys.len(), 5);
|
||||
for key in keys {
|
||||
all_keys.insert(key);
|
||||
}
|
||||
cursor = next_cursor;
|
||||
|
||||
// Second SCAN
|
||||
let (next_cursor, keys): (u64, Vec<String>) = redis::cmd("SCAN")
|
||||
.arg(cursor)
|
||||
.arg("MATCH")
|
||||
.arg("scan_key*")
|
||||
.arg("COUNT")
|
||||
.arg(5)
|
||||
.query(conn)
|
||||
.unwrap();
|
||||
|
||||
assert_ne!(next_cursor, 0);
|
||||
assert_eq!(keys.len(), 5);
|
||||
for key in keys {
|
||||
all_keys.insert(key);
|
||||
if cursor == 0 {
|
||||
break;
|
||||
}
|
||||
cursor = next_cursor;
|
||||
|
||||
// Final SCAN
|
||||
let (next_cursor, keys): (u64, Vec<String>) = redis::cmd("SCAN")
|
||||
.arg(cursor)
|
||||
.arg("MATCH")
|
||||
.arg("scan_key*")
|
||||
.arg("COUNT")
|
||||
.arg(5)
|
||||
.query(conn)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(next_cursor, 0);
|
||||
assert_eq!(keys.len(), 5);
|
||||
for key in keys {
|
||||
all_keys.insert(key);
|
||||
}
|
||||
|
||||
assert_eq!(all_keys.len(), 15);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_hscan_operations(conn: &mut Connection) {
|
||||
// Set up hash data
|
||||
cleanup_keys(conn).await;
|
||||
for i in 0..3 {
|
||||
let _: () = conn.hset("testhash", format!("field{}", i), format!("value{}", i)).unwrap();
|
||||
}
|
||||
|
||||
// Test HSCAN
|
||||
let result: (u64, Vec<String>) = redis::cmd("HSCAN")
|
||||
.arg("testhash")
|
||||
.arg(0)
|
||||
@@ -364,98 +264,54 @@ async fn test_hscan_operations(conn: &mut Connection) {
|
||||
.arg(10)
|
||||
.query(conn)
|
||||
.unwrap();
|
||||
|
||||
let (cursor, fields) = result;
|
||||
assert_eq!(cursor, 0); // Should complete in one scan
|
||||
assert_eq!(fields.len(), 6); // 3 field-value pairs = 6 elements
|
||||
assert_eq!(cursor, 0);
|
||||
assert_eq!(fields.len(), 6);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_transaction_operations(conn: &mut Connection) {
|
||||
// Test MULTI/EXEC
|
||||
cleanup_keys(conn).await;
|
||||
let _: () = redis::cmd("MULTI").query(conn).unwrap();
|
||||
let _: () = redis::cmd("SET").arg("key1").arg("value1").query(conn).unwrap();
|
||||
let _: () = redis::cmd("SET").arg("key2").arg("value2").query(conn).unwrap();
|
||||
let _: Vec<String> = redis::cmd("EXEC").query(conn).unwrap();
|
||||
|
||||
// Verify commands were executed
|
||||
let result: String = conn.get("key1").unwrap();
|
||||
assert_eq!(result, "value1");
|
||||
|
||||
let result: String = conn.get("key2").unwrap();
|
||||
assert_eq!(result, "value2");
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_discard_transaction(conn: &mut Connection) {
|
||||
// Test MULTI/DISCARD
|
||||
cleanup_keys(conn).await;
|
||||
let _: () = redis::cmd("MULTI").query(conn).unwrap();
|
||||
let _: () = redis::cmd("SET").arg("discard").arg("value").query(conn).unwrap();
|
||||
let _: () = redis::cmd("DISCARD").query(conn).unwrap();
|
||||
|
||||
// Verify command was not executed
|
||||
let result: Option<String> = conn.get("discard").unwrap();
|
||||
assert_eq!(result, None);
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_type_command(conn: &mut Connection) {
|
||||
// Test string type
|
||||
cleanup_keys(conn).await;
|
||||
let _: () = conn.set("string", "value").unwrap();
|
||||
let result: String = redis::cmd("TYPE").arg("string").query(conn).unwrap();
|
||||
assert_eq!(result, "string");
|
||||
|
||||
// Test hash type
|
||||
let _: () = conn.hset("hash", "field", "value").unwrap();
|
||||
let result: String = redis::cmd("TYPE").arg("hash").query(conn).unwrap();
|
||||
assert_eq!(result, "hash");
|
||||
|
||||
// Test non-existent key
|
||||
let result: String = redis::cmd("TYPE").arg("noexist").query(conn).unwrap();
|
||||
assert_eq!(result, "none");
|
||||
cleanup_keys(conn).await;
|
||||
}
|
||||
|
||||
async fn test_config_commands(conn: &mut Connection) {
|
||||
// Test CONFIG GET databases
|
||||
let result: Vec<String> = redis::cmd("CONFIG")
|
||||
.arg("GET")
|
||||
.arg("databases")
|
||||
.query(conn)
|
||||
.unwrap();
|
||||
assert_eq!(result, vec!["databases", "16"]);
|
||||
|
||||
// Test CONFIG GET dir
|
||||
let result: Vec<String> = redis::cmd("CONFIG")
|
||||
.arg("GET")
|
||||
.arg("dir")
|
||||
.query(conn)
|
||||
.unwrap();
|
||||
assert_eq!(result[0], "dir");
|
||||
assert!(result[1].contains("/tmp/herodb_test_"));
|
||||
}
|
||||
|
||||
async fn test_info_command(conn: &mut Connection) {
|
||||
// Test INFO
|
||||
cleanup_keys(conn).await;
|
||||
let result: String = redis::cmd("INFO").query(conn).unwrap();
|
||||
assert!(result.contains("redis_version"));
|
||||
|
||||
// Test INFO replication
|
||||
let result: String = redis::cmd("INFO").arg("replication").query(conn).unwrap();
|
||||
assert!(result.contains("role:master"));
|
||||
}
|
||||
|
||||
async fn test_error_handling(conn: &mut Connection) {
|
||||
// Test WRONGTYPE error - try to use hash command on string
|
||||
let _: () = conn.set("string", "value").unwrap();
|
||||
let result: Result<String, _> = conn.hget("string", "field");
|
||||
assert!(result.is_err());
|
||||
|
||||
// Test unknown command
|
||||
let result: Result<String, _> = redis::cmd("UNKNOWN").query(conn);
|
||||
assert!(result.is_err());
|
||||
|
||||
// Test EXEC without MULTI
|
||||
let result: Result<Vec<String>, _> = redis::cmd("EXEC").query(conn);
|
||||
assert!(result.is_err());
|
||||
|
||||
// Test DISCARD without MULTI
|
||||
let result: Result<(), _> = redis::cmd("DISCARD").query(conn);
|
||||
assert!(result.is_err());
|
||||
cleanup_keys(conn).await;
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
use redis_rs::{server::Server, options::DBOption};
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
@@ -20,6 +20,8 @@ async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
dir: test_dir,
|
||||
port,
|
||||
debug: true,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let server = Server::new(option).await;
|
||||
@@ -579,22 +581,19 @@ async fn test_list_operations() {
|
||||
|
||||
// Test LRANGE
|
||||
let response = send_command(&mut stream, "*4\r\n$6\r\nLRANGE\r\n$4\r\nlist\r\n$1\r\n0\r\n$2\r\n-1\r\n").await;
|
||||
assert!(response.contains("b"));
|
||||
assert!(response.contains("a"));
|
||||
assert!(response.contains("c"));
|
||||
assert!(response.contains("d"));
|
||||
assert_eq!(response, "*4\r\n$1\r\nb\r\n$1\r\na\r\n$1\r\nc\r\n$1\r\nd\r\n");
|
||||
|
||||
// Test LINDEX
|
||||
let response = send_command(&mut stream, "*3\r\n$6\r\nLINDEX\r\n$4\r\nlist\r\n$1\r\n0\r\n").await;
|
||||
assert!(response.contains("b"));
|
||||
assert_eq!(response, "$1\r\nb\r\n");
|
||||
|
||||
// Test LPOP
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nLPOP\r\n$4\r\nlist\r\n").await;
|
||||
assert!(response.contains("b"));
|
||||
assert_eq!(response, "$1\r\nb\r\n");
|
||||
|
||||
// Test RPOP
|
||||
let response = send_command(&mut stream, "*2\r\n$4\r\nRPOP\r\n$4\r\nlist\r\n").await;
|
||||
assert!(response.contains("d"));
|
||||
assert_eq!(response, "$1\r\nd\r\n");
|
||||
|
||||
// Test LREM
|
||||
send_command(&mut stream, "*3\r\n$5\r\nLPUSH\r\n$4\r\nlist\r\n$1\r\na\r\n").await; // list is now a, c, a
|
@@ -1,4 +1,4 @@
|
||||
use redis_rs::{server::Server, options::DBOption};
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
@@ -22,6 +22,8 @@ async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
dir: test_dir,
|
||||
port,
|
||||
debug: true,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let server = Server::new(option).await;
|
||||
@@ -91,9 +93,16 @@ async fn test_basic_redis_functionality() {
|
||||
assert!(response.contains("string"));
|
||||
|
||||
// Test QUIT to close connection gracefully
|
||||
let response = send_redis_command(port, "*1\r\n$4\r\nQUIT\r\n").await;
|
||||
let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)).await.unwrap();
|
||||
stream.write_all("*1\r\n$4\r\nQUIT\r\n".as_bytes()).await.unwrap();
|
||||
let mut buffer = [0; 1024];
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
assert!(response.contains("OK"));
|
||||
|
||||
// Ensure the stream is closed
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// Stop the server
|
||||
server_handle.abort();
|
||||
|
||||
@@ -140,10 +149,15 @@ async fn test_hash_operations() {
|
||||
assert!(response.contains("2"));
|
||||
|
||||
// Test HSCAN
|
||||
let response = send_redis_command(port, "*6\r\n$5\r\nHSCAN\r\n$4\r\nhash\r\n$1\r\n0\r\n$5\r\nMATCH\r\n$1\r\n*\r\n$5\r\nCOUNT\r\n$2\r\n10\r\n").await;
|
||||
assert!(response.contains("*2\r\n$1\r\n0\r\n*4\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n$6\r\nfield2\r\n$6\r\nvalue2\r\n"));
|
||||
let response = send_redis_command(port, "*7\r\n$5\r\nHSCAN\r\n$4\r\nhash\r\n$1\r\n0\r\n$5\r\nMATCH\r\n$1\r\n*\r\n$5\r\nCOUNT\r\n$2\r\n10\r\n").await;
|
||||
assert!(response.contains("field1"));
|
||||
assert!(response.contains("value1"));
|
||||
assert!(response.contains("field2"));
|
||||
assert!(response.contains("value2"));
|
||||
|
||||
// Stop the server
|
||||
// For hash operations, we don't have a persistent stream, so we'll just abort the server.
|
||||
// The server should handle closing its connections.
|
||||
server_handle.abort();
|
||||
|
||||
println!("✅ All hash operations tests passed!");
|
||||
@@ -197,9 +211,16 @@ async fn test_transaction_operations() {
|
||||
assert!(response.contains("OK")); // Should contain array of OK responses
|
||||
|
||||
// Verify commands were executed
|
||||
let response = send_redis_command(port, "*2\r\n$3\r\nGET\r\n$4\r\nkey1\r\n").await;
|
||||
stream.write_all("*2\r\n$3\r\nGET\r\n$4\r\nkey1\r\n".as_bytes()).await.unwrap();
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
assert!(response.contains("value1"));
|
||||
|
||||
stream.write_all("*2\r\n$3\r\nGET\r\n$4\r\nkey2\r\n".as_bytes()).await.unwrap();
|
||||
let n = stream.read(&mut buffer).await.unwrap();
|
||||
let response = String::from_utf8_lossy(&buffer[..n]);
|
||||
assert!(response.contains("value2"));
|
||||
|
||||
// Stop the server
|
||||
server_handle.abort();
|
||||
|
@@ -1,4 +1,4 @@
|
||||
use redis_rs::{server::Server, options::DBOption};
|
||||
use herodb::{server::Server, options::DBOption};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
@@ -20,6 +20,8 @@ async fn start_test_server(test_name: &str) -> (Server, u16) {
|
||||
dir: test_dir,
|
||||
port,
|
||||
debug: false,
|
||||
encrypt: false,
|
||||
encryption_key: None,
|
||||
};
|
||||
|
||||
let server = Server::new(option).await;
|
1058
src/storage.rs
1058
src/storage.rs
File diff suppressed because it is too large
Load Diff
9
supervisor/Cargo.toml
Normal file
9
supervisor/Cargo.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
[package]
|
||||
name = "supervisor"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
# The supervisor will eventually depend on the herodb crate.
|
||||
# We can add this dependency now.
|
||||
# herodb = { path = "../herodb" }
|
4
supervisor/src/main.rs
Normal file
4
supervisor/src/main.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
fn main() {
|
||||
println!("Hello from the supervisor crate!");
|
||||
// Supervisor logic will be implemented here.
|
||||
}
|
Reference in New Issue
Block a user