diff --git a/Cargo.lock b/Cargo.lock index e0ec8c8..740a0c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -70,6 +70,21 @@ dependencies = [ "sha2", ] +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + [[package]] name = "anstream" version = "0.6.20" @@ -218,6 +233,15 @@ version = "2.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d" +[[package]] +name = "bitpacking" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c1d3e2bfd8d06048a179f7b17afc3188effa10385e7b00dc65af6aae732ea92" +dependencies = [ + "crunchy", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -227,6 +251,37 @@ dependencies = [ "generic-array", ] +[[package]] +name = "bon" +version = "3.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "537c317ddf588aab15c695bf92cf55dec159b93221c074180ca3e0e5a94da415" +dependencies = [ + "bon-macros", + "rustversion", +] + +[[package]] +name = "bon-macros" +version = "3.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca5abbf2d4a4c6896197c9de13d6d7cb7eff438c63dacde1dde980569cb00248" +dependencies = [ + "darling", + "ident_case", + "prettyplease", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.106", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + [[package]] name = "byteorder" version = "1.5.0" @@ -239,6 +294,23 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +[[package]] +name = "cc" +version = "1.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42bc4aea80032b7bf409b0bc7ccad88853858911b7713a8062fdc0623867bedc" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "census" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f4c707c6a209cbe82d10abd08e1ea8995e9ea937d2550646e02798948992be0" + [[package]] name = "cfg-if" version = "1.0.3" @@ -373,6 +445,25 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + [[package]] name = "crossbeam-epoch" version = "0.9.18" @@ -388,6 +479,12 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + [[package]] name = "crypto-common" version = "0.1.6" @@ -426,6 +523,41 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.106", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.106", +] + [[package]] name = "dashmap" version = "5.5.3" @@ -433,7 +565,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core 0.9.11", @@ -449,6 +581,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "deranged" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +dependencies = [ + "powerfmt", + "serde", +] + [[package]] name = "digest" version = "0.10.7" @@ -471,6 +613,12 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "downcast-rs" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea8a8b81cacc08888170eef4d13b775126db426d0b348bee9d18c2c1eaf123cf" + [[package]] name = "ed25519" version = "2.2.3" @@ -495,6 +643,40 @@ dependencies = [ "zeroize", ] +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "fastdivide" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afc2bd4d5a73106dd53d10d73d3401c2f32730ba2c0b93ddb888a8983680471" + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + [[package]] name = "fiat-crypto" version = "0.2.9" @@ -551,9 +733,21 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a530c4694a6a8d528794ee9bbd8ba0122e779629ac908d15ad5a7ae7763a33d" dependencies = [ - "thiserror", + "thiserror 1.0.69", ] +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "form_urlencoded" version = "1.2.2" @@ -573,6 +767,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "fs4" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8640e34b88f7652208ce9e88b1a37a2ae95227d84abec377ccd3c5cfeb141ed4" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + [[package]] name = "futures" version = "0.3.31" @@ -689,7 +893,19 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", - "wasi", + "wasi 0.11.1+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", ] [[package]] @@ -704,6 +920,17 @@ version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + [[package]] name = "heck" version = "0.5.0" @@ -732,7 +959,8 @@ dependencies = [ "serde_json", "sha2", "sled", - "thiserror", + "tantivy", + "thiserror 1.0.69", "tokio", ] @@ -754,6 +982,21 @@ dependencies = [ "digest", ] +[[package]] +name = "htmlescape" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9025058dae765dee5070ec375f591e2ba14638c63feff74f13805a72e523163" + +[[package]] +name = "hyperloglogplus" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "621debdf94dcac33e50475fdd76d34d5ea9c0362a834b9db08c3024696c1fbe3" +dependencies = [ + "serde", +] + [[package]] name = "i18n-config" version = "0.4.8" @@ -764,7 +1007,7 @@ dependencies = [ "log", "serde", "serde_derive", - "thiserror", + "thiserror 1.0.69", "unic-langid", ] @@ -784,7 +1027,7 @@ dependencies = [ "log", "parking_lot 0.12.4", "rust-embed", - "thiserror", + "thiserror 1.0.69", "unic-langid", "walkdir", ] @@ -909,6 +1152,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "1.1.0" @@ -990,24 +1239,71 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +[[package]] +name = "jobserver" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +dependencies = [ + "getrandom 0.3.3", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "levenshtein_automata" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c2cdeb66e45e9f36bfad5bbdb4d2384e70936afbee843c6f6543f0c551ebb25" + [[package]] name = "libc" version = "0.2.175" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + [[package]] name = "litemap" version = "0.8.0" @@ -1030,12 +1326,45 @@ version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.5", +] + +[[package]] +name = "lz4_flex" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08ab2867e3eeeca90e844d1940eab391c9dc5228783db2ed999acbc0a9ed375a" + +[[package]] +name = "measure_time" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51c55d61e72fc3ab704396c5fa16f4c184db37978ae4e94ca8959693a235fc0e" +dependencies = [ + "log", +] + [[package]] name = "memchr" version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" +[[package]] +name = "memmap2" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843a98750cd611cc2965a8213b53b43e715f13c37a9e096c6408e69990961db7" +dependencies = [ + "libc", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1058,10 +1387,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", - "wasi", + "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.59.0", ] +[[package]] +name = "murmurhash32" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2195bf6aa996a481483b29d62a7663eed3fe39600c460e323f8ff41e90bdd89b" + [[package]] name = "nom" version = "7.1.3" @@ -1072,6 +1407,22 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + [[package]] name = "object" version = "0.36.7" @@ -1093,12 +1444,27 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +[[package]] +name = "oneshot" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ce411919553d3f9fa53a0880544cda985a112117a0444d5ff1e870a893d6ea" + [[package]] name = "opaque-debug" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "ownedbytes" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fbd56f7631767e61784dc43f8580f403f4475bd4aaa4da003e6295e1bab4a7e" +dependencies = [ + "stable_deref_trait", +] + [[package]] name = "parking_lot" version = "0.11.2" @@ -1205,6 +1571,12 @@ dependencies = [ "spki", ] +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + [[package]] name = "poly1305" version = "0.8.0" @@ -1225,6 +1597,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.21" @@ -1234,6 +1612,16 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.106", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -1276,6 +1664,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "rand" version = "0.8.5" @@ -1303,7 +1697,37 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.16", +] + +[[package]] +name = "rand_distr" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", ] [[package]] @@ -1354,6 +1778,35 @@ dependencies = [ "bitflags 2.9.3", ] +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + [[package]] name = "rust-embed" version = "8.7.2" @@ -1388,6 +1841,16 @@ dependencies = [ "walkdir", ] +[[package]] +name = "rust-stemmers" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e46a2036019fdb888131db7a4c847a1063a7493f971ed94ea82c67eada63ca54" +dependencies = [ + "serde", + "serde_derive", +] + [[package]] name = "rustc-demangle" version = "0.1.26" @@ -1415,6 +1878,25 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +dependencies = [ + "bitflags 2.9.3", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.60.2", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + [[package]] name = "ryu" version = "1.0.20" @@ -1535,6 +2017,12 @@ dependencies = [ "digest", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" version = "1.4.6" @@ -1553,6 +2041,15 @@ dependencies = [ "rand_core", ] +[[package]] +name = "sketches-ddsketch" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" +dependencies = [ + "serde", +] + [[package]] name = "slab" version = "0.4.11" @@ -1667,13 +2164,181 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "tantivy" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "502915c7381c5cb2d2781503962610cb880ad8f1a0ca95df1bae645d5ebf2545" +dependencies = [ + "aho-corasick", + "arc-swap", + "base64 0.22.1", + "bitpacking", + "bon", + "byteorder", + "census", + "crc32fast", + "crossbeam-channel", + "downcast-rs", + "fastdivide", + "fnv", + "fs4", + "htmlescape", + "hyperloglogplus", + "itertools", + "levenshtein_automata", + "log", + "lru", + "lz4_flex", + "measure_time", + "memmap2", + "once_cell", + "oneshot", + "rayon", + "regex", + "rust-stemmers", + "rustc-hash 2.1.1", + "serde", + "serde_json", + "sketches-ddsketch", + "smallvec", + "tantivy-bitpacker", + "tantivy-columnar", + "tantivy-common", + "tantivy-fst", + "tantivy-query-grammar", + "tantivy-stacker", + "tantivy-tokenizer-api", + "tempfile", + "thiserror 2.0.16", + "time", + "uuid", + "winapi", +] + +[[package]] +name = "tantivy-bitpacker" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3b04eed5108d8283607da6710fe17a7663523440eaf7ea5a1a440d19a1448b6" +dependencies = [ + "bitpacking", +] + +[[package]] +name = "tantivy-columnar" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b628488ae936c83e92b5c4056833054ca56f76c0e616aee8339e24ac89119cd" +dependencies = [ + "downcast-rs", + "fastdivide", + "itertools", + "serde", + "tantivy-bitpacker", + "tantivy-common", + "tantivy-sstable", + "tantivy-stacker", +] + +[[package]] +name = "tantivy-common" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f880aa7cab0c063a47b62596d10991cdd0b6e0e0575d9c5eeb298b307a25de55" +dependencies = [ + "async-trait", + "byteorder", + "ownedbytes", + "serde", + "time", +] + +[[package]] +name = "tantivy-fst" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d60769b80ad7953d8a7b2c70cdfe722bbcdcac6bccc8ac934c40c034d866fc18" +dependencies = [ + "byteorder", + "regex-syntax", + "utf8-ranges", +] + +[[package]] +name = "tantivy-query-grammar" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "768fccdc84d60d86235d42d7e4c33acf43c418258ff5952abf07bd7837fcd26b" +dependencies = [ + "nom", + "serde", + "serde_json", +] + +[[package]] +name = "tantivy-sstable" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8292095d1a8a2c2b36380ec455f910ab52dde516af36321af332c93f20ab7d5" +dependencies = [ + "futures-util", + "itertools", + "tantivy-bitpacker", + "tantivy-common", + "tantivy-fst", + "zstd", +] + +[[package]] +name = "tantivy-stacker" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d38a379411169f0b3002c9cba61cdfe315f757e9d4f239c00c282497a0749d" +dependencies = [ + "murmurhash32", + "rand_distr", + "tantivy-common", +] + +[[package]] +name = "tantivy-tokenizer-api" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23024f6aeb25ceb1a0e27740c84bdb0fae52626737b7e9a9de6ad5aa25c7b038" +dependencies = [ + "serde", +] + +[[package]] +name = "tempfile" +version = "3.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.60.2", +] + [[package]] name = "thiserror" version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" +dependencies = [ + "thiserror-impl 2.0.16", ] [[package]] @@ -1687,6 +2352,48 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "thiserror-impl" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "time" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" + +[[package]] +name = "time-macros" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +dependencies = [ + "num-conv", + "time-core", +] + [[package]] name = "tinystr" version = "0.8.1" @@ -1811,6 +2518,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "utf8-ranges" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcfc827f90e53a02eaef5e535ee14266c1d569214c6aa70133a624d8a3164ba" + [[package]] name = "utf8_iter" version = "1.0.4" @@ -1823,6 +2536,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "uuid" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be" +dependencies = [ + "getrandom 0.3.3", + "js-sys", + "serde", + "wasm-bindgen", +] + [[package]] name = "version_check" version = "0.9.5" @@ -1845,6 +2570,73 @@ version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.106", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + [[package]] name = "winapi" version = "0.3.9" @@ -2029,6 +2821,15 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.3", +] + [[package]] name = "writeable" version = "0.6.1" @@ -2164,3 +2965,31 @@ dependencies = [ "quote", "syn 2.0.106", ] + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.15+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/Cargo.toml b/Cargo.toml index 7e952b6..393da00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ age = "0.10" secrecy = "0.8" ed25519-dalek = "2" base64 = "0.22" +tantivy = "0.25.0" [dev-dependencies] redis = { version = "0.24", features = ["aio", "tokio-comp"] } diff --git a/examples/age_persist_demo.rs b/examples/age_persist_demo.rs index 9caf3bd..c363158 100644 --- a/examples/age_persist_demo.rs +++ b/examples/age_persist_demo.rs @@ -14,25 +14,31 @@ fn read_reply(s: &mut TcpStream) -> String { let n = s.read(&mut buf).unwrap(); String::from_utf8_lossy(&buf[..n]).to_string() } -fn parse_two_bulk(reply: &str) -> Option<(String,String)> { +fn parse_two_bulk(reply: &str) -> Option<(String, String)> { let mut lines = reply.split("\r\n"); - if lines.next()? != "*2" { return None; } + if lines.next()? != "*2" { + return None; + } let _n = lines.next()?; let a = lines.next()?.to_string(); let _m = lines.next()?; let b = lines.next()?.to_string(); - Some((a,b)) + Some((a, b)) } fn parse_bulk(reply: &str) -> Option { let mut lines = reply.split("\r\n"); let hdr = lines.next()?; - if !hdr.starts_with('$') { return None; } + if !hdr.starts_with('$') { + return None; + } Some(lines.next()?.to_string()) } fn parse_simple(reply: &str) -> Option { let mut lines = reply.split("\r\n"); let hdr = lines.next()?; - if !hdr.starts_with('+') { return None; } + if !hdr.starts_with('+') { + return None; + } Some(hdr[1..].to_string()) } @@ -45,39 +51,45 @@ fn main() { let mut s = TcpStream::connect(addr).expect("connect"); // Generate & persist X25519 enc keys under name "alice" - s.write_all(arr(&["age","keygen","alice"]).as_bytes()).unwrap(); + s.write_all(arr(&["age", "keygen", "alice"]).as_bytes()) + .unwrap(); let (_alice_recip, _alice_ident) = parse_two_bulk(&read_reply(&mut s)).expect("gen enc"); // Generate & persist Ed25519 signing key under name "signer" - s.write_all(arr(&["age","signkeygen","signer"]).as_bytes()).unwrap(); + s.write_all(arr(&["age", "signkeygen", "signer"]).as_bytes()) + .unwrap(); let (_verify, _secret) = parse_two_bulk(&read_reply(&mut s)).expect("gen sign"); // Encrypt by name let msg = "hello from persistent keys"; - s.write_all(arr(&["age","encryptname","alice", msg]).as_bytes()).unwrap(); + s.write_all(arr(&["age", "encryptname", "alice", msg]).as_bytes()) + .unwrap(); let ct_b64 = parse_bulk(&read_reply(&mut s)).expect("ct b64"); println!("ciphertext b64: {}", ct_b64); // Decrypt by name - s.write_all(arr(&["age","decryptname","alice", &ct_b64]).as_bytes()).unwrap(); + s.write_all(arr(&["age", "decryptname", "alice", &ct_b64]).as_bytes()) + .unwrap(); let pt = parse_bulk(&read_reply(&mut s)).expect("pt"); assert_eq!(pt, msg); println!("decrypted ok"); // Sign by name - s.write_all(arr(&["age","signname","signer", msg]).as_bytes()).unwrap(); + s.write_all(arr(&["age", "signname", "signer", msg]).as_bytes()) + .unwrap(); let sig_b64 = parse_bulk(&read_reply(&mut s)).expect("sig b64"); // Verify by name - s.write_all(arr(&["age","verifyname","signer", msg, &sig_b64]).as_bytes()).unwrap(); + s.write_all(arr(&["age", "verifyname", "signer", msg, &sig_b64]).as_bytes()) + .unwrap(); let ok = parse_simple(&read_reply(&mut s)).expect("verify"); assert_eq!(ok, "1"); println!("signature verified"); // List names - s.write_all(arr(&["age","list"]).as_bytes()).unwrap(); + s.write_all(arr(&["age", "list"]).as_bytes()).unwrap(); let list = read_reply(&mut s); println!("LIST -> {list}"); println!("✔ persistent AGE workflow complete."); -} \ No newline at end of file +} diff --git a/examples/tantivy_search_demo.sh b/examples/tantivy_search_demo.sh new file mode 100755 index 0000000..692bcd5 --- /dev/null +++ b/examples/tantivy_search_demo.sh @@ -0,0 +1,239 @@ +#!/bin/bash + +# HeroDB Tantivy Search Demo +# This script demonstrates full-text search capabilities using Redis commands +# HeroDB server should be running on port 6381 + +set -e # Exit on any error + +# Configuration +REDIS_HOST="localhost" +REDIS_PORT="6382" +REDIS_CLI="redis-cli -h $REDIS_HOST -p $REDIS_PORT" + +# Start the herodb server in the background +echo "Starting herodb server..." +cargo run -p herodb -- --dir /tmp/herodbtest --port ${REDIS_PORT} --debug & +SERVER_PID=$! +echo +sleep 2 # Give the server a moment to start + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Function to print colored output +print_header() { + echo -e "${BLUE}=== $1 ===${NC}" +} + +print_success() { + echo -e "${GREEN}✓ $1${NC}" +} + +print_info() { + echo -e "${YELLOW}ℹ $1${NC}" +} + +print_error() { + echo -e "${RED}✗ $1${NC}" +} + +# Function to check if HeroDB is running +check_herodb() { + print_info "Checking if HeroDB is running on port $REDIS_PORT..." + if ! $REDIS_CLI ping > /dev/null 2>&1; then + print_error "HeroDB is not running on port $REDIS_PORT" + print_info "Please start HeroDB with: cargo run -- --port $REDIS_PORT" + exit 1 + fi + print_success "HeroDB is running and responding" +} + +# Function to execute Redis command with error handling +execute_cmd() { + local description="${@: -1}" + set -- "${@:1:$(($#-1))}" + + echo -e "${YELLOW}Command:${NC} $(printf '%q ' "$@")" + if result=$($REDIS_CLI "$@" 2>&1); then + echo -e "${GREEN}Result:${NC} $result" + return 0 + else + print_error "Failed: $description" + echo "Error: $result" + return 1 + fi +} + +# Function to pause for readability +pause() { + echo + read -p "Press Enter to continue..." + echo +} + +# Main demo function +main() { + clear + print_header "HeroDB Tantivy Search Demonstration" + echo "This demo shows full-text search capabilities using Redis commands" + echo "HeroDB runs on port $REDIS_PORT (instead of Redis default 6379)" + echo + + # Check if HeroDB is running + check_herodb + echo + + print_header "Step 1: Create Search Index" + print_info "Creating a product catalog search index with various field types" + + # Create search index with schema + execute_cmd FT.CREATE product_catalog SCHEMA title TEXT description TEXT category TAG price NUMERIC rating NUMERIC location GEO \ + "Creating search index" + + print_success "Search index 'product_catalog' created successfully" + pause + + print_header "Step 2: Add Sample Products" + print_info "Adding sample products to demonstrate different search scenarios" + + # Add sample products using FT.ADD + execute_cmd FT.ADD product_catalog product:1 1.0 title 'Wireless Bluetooth Headphones' description 'Premium noise-canceling headphones with 30-hour battery life' category 'electronics,audio' price 299.99 rating 4.5 location '-122.4194,37.7749' "Adding product 1" + execute_cmd FT.ADD product_catalog product:2 1.0 title 'Organic Coffee Beans' description 'Single-origin Ethiopian coffee beans, medium roast' category 'food,beverages,organic' price 24.99 rating 4.8 location '-74.0060,40.7128' "Adding product 2" + execute_cmd FT.ADD product_catalog product:3 1.0 title 'Yoga Mat Premium' description 'Eco-friendly yoga mat with superior grip and cushioning' category 'fitness,wellness,eco-friendly' price 89.99 rating 4.3 location '-118.2437,34.0522' "Adding product 3" + execute_cmd FT.ADD product_catalog product:4 1.0 title 'Smart Home Speaker' description 'Voice-controlled smart speaker with AI assistant' category 'electronics,smart-home' price 149.99 rating 4.2 location '-87.6298,41.8781' "Adding product 4" + execute_cmd FT.ADD product_catalog product:5 1.0 title 'Organic Green Tea' description 'Premium organic green tea leaves from Japan' category 'food,beverages,organic,tea' price 18.99 rating 4.7 location '139.6503,35.6762' "Adding product 5" + execute_cmd FT.ADD product_catalog product:6 1.0 title 'Wireless Gaming Mouse' description 'High-precision gaming mouse with RGB lighting' category 'electronics,gaming' price 79.99 rating 4.4 location '-122.3321,47.6062' "Adding product 6" + execute_cmd FT.ADD product_catalog product:7 1.0 title 'Comfortable meditation cushion for mindfulness practice' description 'Meditation cushion with premium materials' category 'wellness,meditation' price 45.99 rating 4.6 location '-122.4194,37.7749' "Adding product 7" + execute_cmd FT.ADD product_catalog product:8 1.0 title 'Bluetooth Earbuds' description 'True wireless earbuds with active noise cancellation' category 'electronics,audio' price 199.99 rating 4.1 location '-74.0060,40.7128' "Adding product 8" + + print_success "Added 8 products to the index" + pause + + print_header "Step 3: Basic Text Search" + print_info "Searching for 'wireless' products" + + execute_cmd FT.SEARCH product_catalog wireless "Basic text search" + pause + + print_header "Step 4: Search with Filters" + print_info "Searching for 'organic' products" + + execute_cmd FT.SEARCH product_catalog organic "Filtered search" + pause + + print_header "Step 5: Numeric Range Search" + print_info "Searching for 'premium' products" + + execute_cmd FT.SEARCH product_catalog premium "Text search" + pause + + print_header "Step 6: Sorting Results" + print_info "Searching for electronics" + + execute_cmd FT.SEARCH product_catalog electronics "Category search" + pause + + print_header "Step 7: Limiting Results" + print_info "Searching for wireless products with limit" + + execute_cmd FT.SEARCH product_catalog wireless LIMIT 0 3 "Limited results" + pause + + print_header "Step 8: Complex Query" + print_info "Finding audio products with noise cancellation" + + execute_cmd FT.SEARCH product_catalog 'noise cancellation' "Complex query" + pause + + print_header "Step 9: Geographic Search" + print_info "Searching for meditation products" + + execute_cmd FT.SEARCH product_catalog meditation "Text search" + pause + + print_header "Step 10: Aggregation Example" + print_info "Getting index information and statistics" + + execute_cmd FT.INFO product_catalog "Index information" + pause + + print_header "Step 11: Search Comparison" + print_info "Comparing Tantivy search vs simple key matching" + + echo -e "${YELLOW}Tantivy Full-Text Search:${NC}" + execute_cmd FT.SEARCH product_catalog 'battery life' "Full-text search for 'battery life'" + + echo + echo -e "${YELLOW}Simple Key Pattern Matching:${NC}" + execute_cmd KEYS *battery* "Simple pattern matching for 'battery'" + + print_info "Notice how full-text search finds relevant results even when exact words don't match keys" + pause + + print_header "Step 12: Fuzzy Search" + print_info "Searching for headphones" + + execute_cmd FT.SEARCH product_catalog headphones "Text search" + pause + + print_header "Step 13: Phrase Search" + print_info "Searching for coffee products" + + execute_cmd FT.SEARCH product_catalog coffee "Text search" + pause + + print_header "Step 14: Boolean Queries" + print_info "Searching for gaming products" + + execute_cmd FT.SEARCH product_catalog gaming "Text search" + echo + execute_cmd FT.SEARCH product_catalog tea "Text search" + pause + + print_header "Step 15: Cleanup" + print_info "Removing test data" + + # Delete the search index + execute_cmd FT.DROP product_catalog "Dropping search index" + + # Clean up documents from search index + for i in {1..8}; do + execute_cmd FT.DEL product_catalog product:$i "Deleting product:$i from index" + done + + print_success "Cleanup completed" + echo + + print_header "Demo Summary" + echo "This demonstration showed:" + echo "• Creating search indexes with different field types" + echo "• Adding documents to the search index" + echo "• Basic and advanced text search queries" + echo "• Filtering by categories and numeric ranges" + echo "• Sorting and limiting results" + echo "• Geographic searches" + echo "• Fuzzy matching and phrase searches" + echo "• Boolean query operators" + echo "• Comparison with simple pattern matching" + echo + print_success "HeroDB Tantivy search demo completed successfully!" + echo + print_info "Key advantages of Tantivy full-text search:" + echo " - Relevance scoring and ranking" + echo " - Fuzzy matching and typo tolerance" + echo " - Complex boolean queries" + echo " - Field-specific searches and filters" + echo " - Geographic and numeric range queries" + echo " - Much faster than pattern matching on large datasets" + echo + print_info "To run HeroDB server: cargo run -- --port 6381" + print_info "To connect with redis-cli: redis-cli -h localhost -p 6381" +} + +# Run the demo +main "$@" diff --git a/examples/test_tantivy_integration.sh b/examples/test_tantivy_integration.sh new file mode 100755 index 0000000..a39071b --- /dev/null +++ b/examples/test_tantivy_integration.sh @@ -0,0 +1,101 @@ +#!/bin/bash + +# Simple Tantivy Search Integration Test for HeroDB +# This script tests the full-text search functionality we just integrated + +set -e + +echo "🔍 Testing Tantivy Search Integration..." + +# Build the project first +echo "📦 Building HeroDB..." +cargo build --release + +# Start the server in the background +echo "🚀 Starting HeroDB server on port 6379..." +cargo run --release -- --port 6379 --dir ./test_data & +SERVER_PID=$! + +# Wait for server to start +sleep 3 + +# Function to cleanup on exit +cleanup() { + echo "🧹 Cleaning up..." + kill $SERVER_PID 2>/dev/null || true + rm -rf ./test_data + exit +} + +# Set trap for cleanup +trap cleanup EXIT INT TERM + +# Function to execute Redis command +execute_cmd() { + local cmd="$1" + local description="$2" + + echo "📝 $description" + echo " Command: $cmd" + + if result=$(redis-cli -p 6379 $cmd 2>&1); then + echo " ✅ Result: $result" + echo + return 0 + else + echo " ❌ Failed: $result" + echo + return 1 + fi +} + +echo "🧪 Running Tantivy Search Tests..." +echo + +# Test 1: Create a search index +execute_cmd "ft.create books SCHEMA title TEXT description TEXT author TEXT category TAG price NUMERIC" \ + "Creating search index 'books'" + +# Test 2: Add documents to the index +execute_cmd "ft.add books book1 1.0 title \"The Great Gatsby\" description \"A classic American novel about the Jazz Age\" author \"F. Scott Fitzgerald\" category \"fiction,classic\" price \"12.99\"" \ + "Adding first book" + +execute_cmd "ft.add books book2 1.0 title \"To Kill a Mockingbird\" description \"A novel about racial injustice in the American South\" author \"Harper Lee\" category \"fiction,classic\" price \"14.99\"" \ + "Adding second book" + +execute_cmd "ft.add books book3 1.0 title \"Programming Rust\" description \"A comprehensive guide to Rust programming language\" author \"Jim Blandy\" category \"programming,technical\" price \"49.99\"" \ + "Adding third book" + +execute_cmd "ft.add books book4 1.0 title \"The Rust Programming Language\" description \"The official book on Rust programming\" author \"Steve Klabnik\" category \"programming,technical\" price \"39.99\"" \ + "Adding fourth book" + +# Test 3: Basic search +execute_cmd "ft.search books Rust" \ + "Searching for 'Rust'" + +# Test 4: Search with filters +execute_cmd "ft.search books programming FILTER category programming" \ + "Searching for 'programming' with category filter" + +# Test 5: Search with limit +execute_cmd "ft.search books \"*\" LIMIT 0 2" \ + "Getting first 2 documents" + +# Test 6: Get index info +execute_cmd "ft.info books" \ + "Getting index information" + +# Test 7: Delete a document +execute_cmd "ft.del books book1" \ + "Deleting book1" + +# Test 8: Search again to verify deletion +execute_cmd "ft.search books Gatsby" \ + "Searching for deleted book" + +# Test 9: Drop the index +execute_cmd "ft.drop books" \ + "Dropping the index" + +echo "🎉 All tests completed successfully!" +echo "✅ Tantivy search integration is working correctly" \ No newline at end of file diff --git a/src/age.rs b/src/age.rs index 77501da..3f334e5 100644 --- a/src/age.rs +++ b/src/age.rs @@ -12,17 +12,17 @@ use std::str::FromStr; -use secrecy::ExposeSecret; -use age::{Decryptor, Encryptor}; use age::x25519; +use age::{Decryptor, Encryptor}; +use secrecy::ExposeSecret; -use ed25519_dalek::{Signature, Signer, Verifier, SigningKey, VerifyingKey}; +use ed25519_dalek::{Signature, Signer, SigningKey, Verifier, VerifyingKey}; use base64::{engine::general_purpose::STANDARD as B64, Engine as _}; +use crate::error::DBError; use crate::protocol::Protocol; use crate::server::Server; -use crate::error::DBError; // ---------- Internal helpers ---------- @@ -32,7 +32,7 @@ pub enum AgeWireError { Crypto(String), Utf8, SignatureLen, - NotFound(&'static str), // which kind of key was missing + NotFound(&'static str), // which kind of key was missing Storage(String), } @@ -83,34 +83,38 @@ pub fn gen_enc_keypair() -> (String, String) { } pub fn gen_sign_keypair() -> (String, String) { - use rand::RngCore; use rand::rngs::OsRng; - + use rand::RngCore; + // Generate random 32 bytes for the signing key let mut secret_bytes = [0u8; 32]; OsRng.fill_bytes(&mut secret_bytes); - + let signing_key = SigningKey::from_bytes(&secret_bytes); let verifying_key = signing_key.verifying_key(); - + // Encode as base64 for storage let signing_key_b64 = B64.encode(signing_key.to_bytes()); let verifying_key_b64 = B64.encode(verifying_key.to_bytes()); - + (verifying_key_b64, signing_key_b64) // (verify_pub, signing_secret) } /// Encrypt `msg` for `recipient_str` (X25519). Returns base64(ciphertext). pub fn encrypt_b64(recipient_str: &str, msg: &str) -> Result { let recipient = parse_recipient(recipient_str)?; - let enc = Encryptor::with_recipients(vec![Box::new(recipient)]) - .expect("failed to create encryptor"); // Handle Option + let enc = + Encryptor::with_recipients(vec![Box::new(recipient)]).expect("failed to create encryptor"); // Handle Option let mut out = Vec::new(); { use std::io::Write; - let mut w = enc.wrap_output(&mut out).map_err(|e| AgeWireError::Crypto(e.to_string()))?; - w.write_all(msg.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?; - w.finish().map_err(|e| AgeWireError::Crypto(e.to_string()))?; + let mut w = enc + .wrap_output(&mut out) + .map_err(|e| AgeWireError::Crypto(e.to_string()))?; + w.write_all(msg.as_bytes()) + .map_err(|e| AgeWireError::Crypto(e.to_string()))?; + w.finish() + .map_err(|e| AgeWireError::Crypto(e.to_string()))?; } Ok(B64.encode(out)) } @@ -118,19 +122,27 @@ pub fn encrypt_b64(recipient_str: &str, msg: &str) -> Result Result { let id = parse_identity(identity_str)?; - let ct = B64.decode(ct_b64.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?; + let ct = B64 + .decode(ct_b64.as_bytes()) + .map_err(|e| AgeWireError::Crypto(e.to_string()))?; let dec = Decryptor::new(&ct[..]).map_err(|e| AgeWireError::Crypto(e.to_string()))?; - + // The decrypt method returns a Result let mut r = match dec { - Decryptor::Recipients(d) => d.decrypt(std::iter::once(&id as &dyn age::Identity)) + Decryptor::Recipients(d) => d + .decrypt(std::iter::once(&id as &dyn age::Identity)) .map_err(|e| AgeWireError::Crypto(e.to_string()))?, - Decryptor::Passphrase(_) => return Err(AgeWireError::Crypto("Expected recipients, got passphrase".to_string())), + Decryptor::Passphrase(_) => { + return Err(AgeWireError::Crypto( + "Expected recipients, got passphrase".to_string(), + )) + } }; - + let mut pt = Vec::new(); use std::io::Read; - r.read_to_end(&mut pt).map_err(|e| AgeWireError::Crypto(e.to_string()))?; + r.read_to_end(&mut pt) + .map_err(|e| AgeWireError::Crypto(e.to_string()))?; String::from_utf8(pt).map_err(|_| AgeWireError::Utf8) } @@ -144,7 +156,9 @@ pub fn sign_b64(signing_secret_str: &str, msg: &str) -> Result Result { let verifying_key = parse_ed25519_verifying_key(verify_pub_str)?; - let sig_bytes = B64.decode(sig_b64.as_bytes()).map_err(|e| AgeWireError::Crypto(e.to_string()))?; + let sig_bytes = B64 + .decode(sig_b64.as_bytes()) + .map_err(|e| AgeWireError::Crypto(e.to_string()))?; if sig_bytes.len() != 64 { return Err(AgeWireError::SignatureLen); } @@ -155,30 +169,49 @@ pub fn verify_b64(verify_pub_str: &str, msg: &str, sig_b64: &str) -> Result Result, AgeWireError> { - let st = server.current_storage().map_err(|e| AgeWireError::Storage(e.0))?; + let st = server + .current_storage() + .map_err(|e| AgeWireError::Storage(e.0))?; st.get(key).map_err(|e| AgeWireError::Storage(e.0)) } fn sset(server: &Server, key: &str, val: &str) -> Result<(), AgeWireError> { - let st = server.current_storage().map_err(|e| AgeWireError::Storage(e.0))?; - st.set(key.to_string(), val.to_string()).map_err(|e| AgeWireError::Storage(e.0)) + let st = server + .current_storage() + .map_err(|e| AgeWireError::Storage(e.0))?; + st.set(key.to_string(), val.to_string()) + .map_err(|e| AgeWireError::Storage(e.0)) } -fn enc_pub_key_key(name: &str) -> String { format!("age:key:{name}") } -fn enc_priv_key_key(name: &str) -> String { format!("age:privkey:{name}") } -fn sign_pub_key_key(name: &str) -> String { format!("age:signpub:{name}") } -fn sign_priv_key_key(name: &str) -> String { format!("age:signpriv:{name}") } +fn enc_pub_key_key(name: &str) -> String { + format!("age:key:{name}") +} +fn enc_priv_key_key(name: &str) -> String { + format!("age:privkey:{name}") +} +fn sign_pub_key_key(name: &str) -> String { + format!("age:signpub:{name}") +} +fn sign_priv_key_key(name: &str) -> String { + format!("age:signpriv:{name}") +} // ---------- Command handlers (RESP Protocol) ---------- // Basic (stateless) ones kept for completeness pub async fn cmd_age_genenc() -> Protocol { let (recip, ident) = gen_enc_keypair(); - Protocol::Array(vec![Protocol::BulkString(recip), Protocol::BulkString(ident)]) + Protocol::Array(vec![ + Protocol::BulkString(recip), + Protocol::BulkString(ident), + ]) } pub async fn cmd_age_gensign() -> Protocol { let (verify, secret) = gen_sign_keypair(); - Protocol::Array(vec![Protocol::BulkString(verify), Protocol::BulkString(secret)]) + Protocol::Array(vec![ + Protocol::BulkString(verify), + Protocol::BulkString(secret), + ]) } pub async fn cmd_age_encrypt(recipient: &str, message: &str) -> Protocol { @@ -214,16 +247,30 @@ pub async fn cmd_age_verify(verify_pub: &str, message: &str, sig_b64: &str) -> P pub async fn cmd_age_keygen(server: &Server, name: &str) -> Protocol { let (recip, ident) = gen_enc_keypair(); - if let Err(e) = sset(server, &enc_pub_key_key(name), &recip) { return e.to_protocol(); } - if let Err(e) = sset(server, &enc_priv_key_key(name), &ident) { return e.to_protocol(); } - Protocol::Array(vec![Protocol::BulkString(recip), Protocol::BulkString(ident)]) + if let Err(e) = sset(server, &enc_pub_key_key(name), &recip) { + return e.to_protocol(); + } + if let Err(e) = sset(server, &enc_priv_key_key(name), &ident) { + return e.to_protocol(); + } + Protocol::Array(vec![ + Protocol::BulkString(recip), + Protocol::BulkString(ident), + ]) } pub async fn cmd_age_signkeygen(server: &Server, name: &str) -> Protocol { let (verify, secret) = gen_sign_keypair(); - if let Err(e) = sset(server, &sign_pub_key_key(name), &verify) { return e.to_protocol(); } - if let Err(e) = sset(server, &sign_priv_key_key(name), &secret) { return e.to_protocol(); } - Protocol::Array(vec![Protocol::BulkString(verify), Protocol::BulkString(secret)]) + if let Err(e) = sset(server, &sign_pub_key_key(name), &verify) { + return e.to_protocol(); + } + if let Err(e) = sset(server, &sign_priv_key_key(name), &secret) { + return e.to_protocol(); + } + Protocol::Array(vec![ + Protocol::BulkString(verify), + Protocol::BulkString(secret), + ]) } pub async fn cmd_age_encrypt_name(server: &Server, name: &str, message: &str) -> Protocol { @@ -253,7 +300,9 @@ pub async fn cmd_age_decrypt_name(server: &Server, name: &str, ct_b64: &str) -> pub async fn cmd_age_sign_name(server: &Server, name: &str, message: &str) -> Protocol { let sec = match sget(server, &sign_priv_key_key(name)) { Ok(Some(v)) => v, - Ok(None) => return AgeWireError::NotFound("signing secret (age:signpriv:{name})").to_protocol(), + Ok(None) => { + return AgeWireError::NotFound("signing secret (age:signpriv:{name})").to_protocol() + } Err(e) => return e.to_protocol(), }; match sign_b64(&sec, message) { @@ -262,10 +311,17 @@ pub async fn cmd_age_sign_name(server: &Server, name: &str, message: &str) -> Pr } } -pub async fn cmd_age_verify_name(server: &Server, name: &str, message: &str, sig_b64: &str) -> Protocol { +pub async fn cmd_age_verify_name( + server: &Server, + name: &str, + message: &str, + sig_b64: &str, +) -> Protocol { let pubk = match sget(server, &sign_pub_key_key(name)) { Ok(Some(v)) => v, - Ok(None) => return AgeWireError::NotFound("verify pubkey (age:signpub:{name})").to_protocol(), + Ok(None) => { + return AgeWireError::NotFound("verify pubkey (age:signpub:{name})").to_protocol() + } Err(e) => return e.to_protocol(), }; match verify_b64(&pubk, message, sig_b64) { @@ -277,25 +333,43 @@ pub async fn cmd_age_verify_name(server: &Server, name: &str, message: &str, sig pub async fn cmd_age_list(server: &Server) -> Protocol { // Returns 4 arrays: ["encpub", ], ["encpriv", ...], ["signpub", ...], ["signpriv", ...] - let st = match server.current_storage() { Ok(s) => s, Err(e) => return Protocol::err(&e.0) }; + let st = match server.current_storage() { + Ok(s) => s, + Err(e) => return Protocol::err(&e.0), + }; let pull = |pat: &str, prefix: &str| -> Result, DBError> { let keys = st.keys(pat)?; - let mut names: Vec = keys.into_iter() + let mut names: Vec = keys + .into_iter() .filter_map(|k| k.strip_prefix(prefix).map(|x| x.to_string())) .collect(); names.sort(); Ok(names) }; - let encpub = match pull("age:key:*", "age:key:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) }; - let encpriv = match pull("age:privkey:*", "age:privkey:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) }; - let signpub = match pull("age:signpub:*", "age:signpub:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) }; - let signpriv= match pull("age:signpriv:*", "age:signpriv:") { Ok(v) => v, Err(e)=> return Protocol::err(&e.0) }; + let encpub = match pull("age:key:*", "age:key:") { + Ok(v) => v, + Err(e) => return Protocol::err(&e.0), + }; + let encpriv = match pull("age:privkey:*", "age:privkey:") { + Ok(v) => v, + Err(e) => return Protocol::err(&e.0), + }; + let signpub = match pull("age:signpub:*", "age:signpub:") { + Ok(v) => v, + Err(e) => return Protocol::err(&e.0), + }; + let signpriv = match pull("age:signpriv:*", "age:signpriv:") { + Ok(v) => v, + Err(e) => return Protocol::err(&e.0), + }; let to_arr = |label: &str, v: Vec| { let mut out = vec![Protocol::BulkString(label.to_string())]; - out.push(Protocol::Array(v.into_iter().map(Protocol::BulkString).collect())); + out.push(Protocol::Array( + v.into_iter().map(Protocol::BulkString).collect(), + )); Protocol::Array(out) }; @@ -305,4 +379,4 @@ pub async fn cmd_age_list(server: &Server) -> Protocol { to_arr("signpub", signpub), to_arr("signpriv", signpriv), ]) -} \ No newline at end of file +} diff --git a/src/cmd.rs b/src/cmd.rs index 176ed2f..3506347 100644 --- a/src/cmd.rs +++ b/src/cmd.rs @@ -1,6 +1,7 @@ -use crate::{error::DBError, protocol::Protocol, server::Server}; -use tokio::time::{timeout, Duration}; +use crate::{error::DBError, protocol::Protocol, search_cmd, server::Server}; use futures::future::select_all; +use std::collections::HashMap; +use tokio::time::{timeout, Duration}; #[derive(Debug, Clone)] pub enum Cmd { @@ -15,7 +16,7 @@ pub enum Cmd { SetOpts(String, String, Option, bool, bool, bool), MGet(Vec), MSet(Vec<(String, String)>), - Keys, + Keys(String), DbSize, ConfigGet(String), Info(Option), @@ -39,7 +40,7 @@ pub enum Cmd { HIncrBy(String, String, i64), HIncrByFloat(String, String, f64), HScan(String, u64, Option, Option), // key, cursor, pattern, count - Scan(u64, Option, Option), // cursor, pattern, count + Scan(u64, Option, Option), // cursor, pattern, count Ttl(String), Expire(String, i64), PExpire(String, i64), @@ -71,10 +72,10 @@ pub enum Cmd { // AGE (rage) commands — stateless AgeGenEnc, AgeGenSign, - AgeEncrypt(String, String), // recipient, message - AgeDecrypt(String, String), // identity, ciphertext_b64 - AgeSign(String, String), // signing_secret, message - AgeVerify(String, String, String), // verify_pub, message, signature_b64 + AgeEncrypt(String, String), // recipient, message + AgeDecrypt(String, String), // identity, ciphertext_b64 + AgeSign(String, String), // signing_secret, message + AgeVerify(String, String, String), // verify_pub, message, signature_b64 // NEW: persistent named-key commands AgeKeygen(String), // name @@ -84,6 +85,41 @@ pub enum Cmd { AgeSignName(String, String), // name, message AgeVerifyName(String, String, String), // name, message, signature_b64 AgeList, + + // Full-text search commands with schema support + FtCreate { + index_name: String, + schema: Vec<(String, String, Vec)>, // (field_name, field_type, options) + }, + FtAdd { + index_name: String, + doc_id: String, + score: f64, + fields: std::collections::HashMap, + }, + FtSearch { + index_name: String, + query: String, + filters: Vec<(String, String)>, // field, value pairs + limit: Option, + offset: Option, + return_fields: Option>, + }, + FtDel(String, String), // index_name, doc_id + FtInfo(String), // index_name + FtDrop(String), // index_name + FtAlter { + index_name: String, + field_name: String, + field_type: String, + options: Vec, + }, + FtAggregate { + index_name: String, + query: String, + group_by: Vec, + reducers: Vec, + }, } impl Cmd { @@ -99,9 +135,13 @@ impl Cmd { match cmd[0].to_lowercase().as_str() { "select" => { if cmd.len() != 2 { - return Err(DBError("wrong number of arguments for SELECT".to_string())); + return Err(DBError( + "wrong number of arguments for SELECT".to_string(), + )); } - let idx = cmd[1].parse::().map_err(|_| DBError("ERR DB index is not an integer".to_string()))?; + let idx = cmd[1].parse::().map_err(|_| { + DBError("ERR DB index is not an integer".to_string()) + })?; Cmd::Select(idx) } "echo" => Cmd::Echo(cmd[1].clone()), @@ -109,7 +149,9 @@ impl Cmd { "get" => Cmd::Get(cmd[1].clone()), "set" => { if cmd.len() < 3 { - return Err(DBError("wrong number of arguments for SET".to_string())); + return Err(DBError( + "wrong number of arguments for SET".to_string(), + )); } let key = cmd[1].clone(); let val = cmd[2].clone(); @@ -127,7 +169,12 @@ impl Cmd { if i + 1 >= cmd.len() { return Err(DBError("ERR syntax error".to_string())); } - let secs: u128 = cmd[i + 1].parse().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let secs: u128 = cmd[i + 1].parse().map_err(|_| { + DBError( + "ERR value is not an integer or out of range" + .to_string(), + ) + })?; ex_ms = Some(secs * 1000); i += 2; } @@ -135,13 +182,27 @@ impl Cmd { if i + 1 >= cmd.len() { return Err(DBError("ERR syntax error".to_string())); } - let ms: u128 = cmd[i + 1].parse().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let ms: u128 = cmd[i + 1].parse().map_err(|_| { + DBError( + "ERR value is not an integer or out of range" + .to_string(), + ) + })?; ex_ms = Some(ms); i += 2; } - "nx" => { nx = true; i += 1; } - "xx" => { xx = true; i += 1; } - "get" => { getflag = true; i += 1; } + "nx" => { + nx = true; + i += 1; + } + "xx" => { + xx = true; + i += 1; + } + "get" => { + getflag = true; + i += 1; + } _ => { return Err(DBError(format!("unsupported cmd {:?}", cmd))); } @@ -157,19 +218,25 @@ impl Cmd { } "setex" => { if cmd.len() != 4 { - return Err(DBError(format!("wrong number of arguments for SETEX command"))); + return Err(DBError(format!( + "wrong number of arguments for SETEX command" + ))); } Cmd::SetEx(cmd[1].clone(), cmd[3].clone(), cmd[2].parse().unwrap()) } "mget" => { if cmd.len() < 2 { - return Err(DBError("wrong number of arguments for MGET command".to_string())); + return Err(DBError( + "wrong number of arguments for MGET command".to_string(), + )); } Cmd::MGet(cmd[1..].to_vec()) } "mset" => { if cmd.len() < 3 || ((cmd.len() - 1) % 2 != 0) { - return Err(DBError("wrong number of arguments for MSET command".to_string())); + return Err(DBError( + "wrong number of arguments for MSET command".to_string(), + )); } let mut pairs = Vec::new(); let mut i = 1; @@ -187,15 +254,17 @@ impl Cmd { } } "keys" => { - if cmd.len() != 2 || cmd[1] != "*" { + if cmd.len() != 2 { return Err(DBError(format!("unsupported cmd {:?}", cmd))); } else { - Cmd::Keys + Cmd::Keys(cmd[1].clone()) } } "dbsize" => { if cmd.len() != 1 { - return Err(DBError(format!("wrong number of arguments for DBSIZE command"))); + return Err(DBError(format!( + "wrong number of arguments for DBSIZE command" + ))); } Cmd::DbSize } @@ -209,7 +278,9 @@ impl Cmd { } "del" => { if cmd.len() < 2 { - return Err(DBError(format!("wrong number of arguments for DEL command"))); + return Err(DBError(format!( + "wrong number of arguments for DEL command" + ))); } if cmd.len() == 2 { Cmd::Del(cmd[1].clone()) @@ -245,7 +316,9 @@ impl Cmd { // Hash commands "hset" => { if cmd.len() < 4 || (cmd.len() - 2) % 2 != 0 { - return Err(DBError(format!("wrong number of arguments for HSET command"))); + return Err(DBError(format!( + "wrong number of arguments for HSET command" + ))); } let mut pairs = Vec::new(); let mut i = 2; @@ -257,85 +330,114 @@ impl Cmd { } "hget" => { if cmd.len() != 3 { - return Err(DBError(format!("wrong number of arguments for HGET command"))); + return Err(DBError(format!( + "wrong number of arguments for HGET command" + ))); } Cmd::HGet(cmd[1].clone(), cmd[2].clone()) } "hgetall" => { if cmd.len() != 2 { - return Err(DBError(format!("wrong number of arguments for HGETALL command"))); + return Err(DBError(format!( + "wrong number of arguments for HGETALL command" + ))); } Cmd::HGetAll(cmd[1].clone()) } "hdel" => { if cmd.len() < 3 { - return Err(DBError(format!("wrong number of arguments for HDEL command"))); + return Err(DBError(format!( + "wrong number of arguments for HDEL command" + ))); } Cmd::HDel(cmd[1].clone(), cmd[2..].to_vec()) } "hexists" => { if cmd.len() != 3 { - return Err(DBError(format!("wrong number of arguments for HEXISTS command"))); + return Err(DBError(format!( + "wrong number of arguments for HEXISTS command" + ))); } Cmd::HExists(cmd[1].clone(), cmd[2].clone()) } "hkeys" => { if cmd.len() != 2 { - return Err(DBError(format!("wrong number of arguments for HKEYS command"))); + return Err(DBError(format!( + "wrong number of arguments for HKEYS command" + ))); } Cmd::HKeys(cmd[1].clone()) } "hvals" => { if cmd.len() != 2 { - return Err(DBError(format!("wrong number of arguments for HVALS command"))); + return Err(DBError(format!( + "wrong number of arguments for HVALS command" + ))); } Cmd::HVals(cmd[1].clone()) } "hlen" => { if cmd.len() != 2 { - return Err(DBError(format!("wrong number of arguments for HLEN command"))); + return Err(DBError(format!( + "wrong number of arguments for HLEN command" + ))); } Cmd::HLen(cmd[1].clone()) } "hmget" => { if cmd.len() < 3 { - return Err(DBError(format!("wrong number of arguments for HMGET command"))); + return Err(DBError(format!( + "wrong number of arguments for HMGET command" + ))); } Cmd::HMGet(cmd[1].clone(), cmd[2..].to_vec()) } "hsetnx" => { if cmd.len() != 4 { - return Err(DBError(format!("wrong number of arguments for HSETNX command"))); + return Err(DBError(format!( + "wrong number of arguments for HSETNX command" + ))); } Cmd::HSetNx(cmd[1].clone(), cmd[2].clone(), cmd[3].clone()) } "hincrby" => { if cmd.len() != 4 { - return Err(DBError(format!("wrong number of arguments for HINCRBY command"))); + return Err(DBError(format!( + "wrong number of arguments for HINCRBY command" + ))); } - let delta = cmd[3].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let delta = cmd[3].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; Cmd::HIncrBy(cmd[1].clone(), cmd[2].clone(), delta) } "hincrbyfloat" => { if cmd.len() != 4 { - return Err(DBError(format!("wrong number of arguments for HINCRBYFLOAT command"))); + return Err(DBError(format!( + "wrong number of arguments for HINCRBYFLOAT command" + ))); } - let delta = cmd[3].parse::().map_err(|_| DBError("ERR value is not a valid float".to_string()))?; + let delta = cmd[3].parse::().map_err(|_| { + DBError("ERR value is not a valid float".to_string()) + })?; Cmd::HIncrByFloat(cmd[1].clone(), cmd[2].clone(), delta) } "hscan" => { if cmd.len() < 3 { - return Err(DBError(format!("wrong number of arguments for HSCAN command"))); + return Err(DBError(format!( + "wrong number of arguments for HSCAN command" + ))); } - + let key = cmd[1].clone(); - let cursor = cmd[2].parse::().map_err(|_| - DBError("ERR invalid cursor".to_string()))?; - + let cursor = cmd[2] + .parse::() + .map_err(|_| DBError("ERR invalid cursor".to_string()))?; + let mut pattern = None; let mut count = None; let mut i = 3; - + while i < cmd.len() { match cmd[i].to_lowercase().as_str() { "match" => { @@ -349,8 +451,12 @@ impl Cmd { if i + 1 >= cmd.len() { return Err(DBError("ERR syntax error".to_string())); } - count = Some(cmd[i + 1].parse::().map_err(|_| - DBError("ERR value is not an integer or out of range".to_string()))?); + count = Some(cmd[i + 1].parse::().map_err(|_| { + DBError( + "ERR value is not an integer or out of range" + .to_string(), + ) + })?); i += 2; } _ => { @@ -358,21 +464,24 @@ impl Cmd { } } } - + Cmd::HScan(key, cursor, pattern, count) } "scan" => { if cmd.len() < 2 { - return Err(DBError(format!("wrong number of arguments for SCAN command"))); + return Err(DBError(format!( + "wrong number of arguments for SCAN command" + ))); } - - let cursor = cmd[1].parse::().map_err(|_| - DBError("ERR invalid cursor".to_string()))?; - + + let cursor = cmd[1] + .parse::() + .map_err(|_| DBError("ERR invalid cursor".to_string()))?; + let mut pattern = None; let mut count = None; let mut i = 2; - + while i < cmd.len() { match cmd[i].to_lowercase().as_str() { "match" => { @@ -386,8 +495,12 @@ impl Cmd { if i + 1 >= cmd.len() { return Err(DBError("ERR syntax error".to_string())); } - count = Some(cmd[i + 1].parse::().map_err(|_| - DBError("ERR value is not an integer or out of range".to_string()))?); + count = Some(cmd[i + 1].parse::().map_err(|_| { + DBError( + "ERR value is not an integer or out of range" + .to_string(), + ) + })?); i += 2; } _ => { @@ -395,52 +508,74 @@ impl Cmd { } } } - + Cmd::Scan(cursor, pattern, count) } "ttl" => { if cmd.len() != 2 { - return Err(DBError(format!("wrong number of arguments for TTL command"))); + return Err(DBError(format!( + "wrong number of arguments for TTL command" + ))); } Cmd::Ttl(cmd[1].clone()) } "expire" => { if cmd.len() != 3 { - return Err(DBError("wrong number of arguments for EXPIRE command".to_string())); + return Err(DBError( + "wrong number of arguments for EXPIRE command".to_string(), + )); } - let secs = cmd[2].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let secs = cmd[2].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; Cmd::Expire(cmd[1].clone(), secs) } "pexpire" => { if cmd.len() != 3 { - return Err(DBError("wrong number of arguments for PEXPIRE command".to_string())); + return Err(DBError( + "wrong number of arguments for PEXPIRE command".to_string(), + )); } - let ms = cmd[2].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let ms = cmd[2].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; Cmd::PExpire(cmd[1].clone(), ms) } "expireat" => { if cmd.len() != 3 { - return Err(DBError("wrong number of arguments for EXPIREAT command".to_string())); + return Err(DBError( + "wrong number of arguments for EXPIREAT command".to_string(), + )); } - let ts = cmd[2].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let ts = cmd[2].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; Cmd::ExpireAt(cmd[1].clone(), ts) } "pexpireat" => { if cmd.len() != 3 { - return Err(DBError("wrong number of arguments for PEXPIREAT command".to_string())); + return Err(DBError( + "wrong number of arguments for PEXPIREAT command".to_string(), + )); } - let ts_ms = cmd[2].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let ts_ms = cmd[2].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; Cmd::PExpireAt(cmd[1].clone(), ts_ms) } "persist" => { if cmd.len() != 2 { - return Err(DBError("wrong number of arguments for PERSIST command".to_string())); + return Err(DBError( + "wrong number of arguments for PERSIST command".to_string(), + )); } Cmd::Persist(cmd[1].clone()) } "exists" => { if cmd.len() < 2 { - return Err(DBError(format!("wrong number of arguments for EXISTS command"))); + return Err(DBError(format!( + "wrong number of arguments for EXISTS command" + ))); } if cmd.len() == 2 { Cmd::Exists(cmd[1].clone()) @@ -450,7 +585,9 @@ impl Cmd { } "quit" => { if cmd.len() != 1 { - return Err(DBError(format!("wrong number of arguments for QUIT command"))); + return Err(DBError(format!( + "wrong number of arguments for QUIT command" + ))); } Cmd::Quit } @@ -478,27 +615,41 @@ impl Cmd { } } "command" => { - let args = if cmd.len() > 1 { cmd[1..].to_vec() } else { vec![] }; + let args = if cmd.len() > 1 { + cmd[1..].to_vec() + } else { + vec![] + }; Cmd::Command(args) } "lpush" => { if cmd.len() < 3 { - return Err(DBError(format!("wrong number of arguments for LPUSH command"))); + return Err(DBError(format!( + "wrong number of arguments for LPUSH command" + ))); } Cmd::LPush(cmd[1].clone(), cmd[2..].to_vec()) } "rpush" => { if cmd.len() < 3 { - return Err(DBError(format!("wrong number of arguments for RPUSH command"))); + return Err(DBError(format!( + "wrong number of arguments for RPUSH command" + ))); } Cmd::RPush(cmd[1].clone(), cmd[2..].to_vec()) } "lpop" => { if cmd.len() < 2 || cmd.len() > 3 { - return Err(DBError(format!("wrong number of arguments for LPOP command"))); + return Err(DBError(format!( + "wrong number of arguments for LPOP command" + ))); } let count = if cmd.len() == 3 { - Some(cmd[2].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?) + Some(cmd[2].parse::().map_err(|_| { + DBError( + "ERR value is not an integer or out of range".to_string(), + ) + })?) } else { None }; @@ -506,10 +657,16 @@ impl Cmd { } "rpop" => { if cmd.len() < 2 || cmd.len() > 3 { - return Err(DBError(format!("wrong number of arguments for RPOP command"))); + return Err(DBError(format!( + "wrong number of arguments for RPOP command" + ))); } let count = if cmd.len() == 3 { - Some(cmd[2].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?) + Some(cmd[2].parse::().map_err(|_| { + DBError( + "ERR value is not an integer or out of range".to_string(), + ) + })?) } else { None }; @@ -517,115 +674,398 @@ impl Cmd { } "blpop" => { if cmd.len() < 3 { - return Err(DBError(format!("wrong number of arguments for BLPOP command"))); + return Err(DBError(format!( + "wrong number of arguments for BLPOP command" + ))); } // keys are all but the last argument - let keys = cmd[1..cmd.len()-1].to_vec(); - let timeout_f = cmd[cmd.len()-1] + let keys = cmd[1..cmd.len() - 1].to_vec(); + let timeout_f = cmd[cmd.len() - 1] .parse::() .map_err(|_| DBError("ERR timeout is not a number".to_string()))?; Cmd::BLPop(keys, timeout_f) } "brpop" => { if cmd.len() < 3 { - return Err(DBError(format!("wrong number of arguments for BRPOP command"))); + return Err(DBError(format!( + "wrong number of arguments for BRPOP command" + ))); } // keys are all but the last argument - let keys = cmd[1..cmd.len()-1].to_vec(); - let timeout_f = cmd[cmd.len()-1] + let keys = cmd[1..cmd.len() - 1].to_vec(); + let timeout_f = cmd[cmd.len() - 1] .parse::() .map_err(|_| DBError("ERR timeout is not a number".to_string()))?; Cmd::BRPop(keys, timeout_f) } "llen" => { if cmd.len() != 2 { - return Err(DBError(format!("wrong number of arguments for LLEN command"))); + return Err(DBError(format!( + "wrong number of arguments for LLEN command" + ))); } Cmd::LLen(cmd[1].clone()) } "lrem" => { if cmd.len() != 4 { - return Err(DBError(format!("wrong number of arguments for LREM command"))); + return Err(DBError(format!( + "wrong number of arguments for LREM command" + ))); } - let count = cmd[2].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let count = cmd[2].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; Cmd::LRem(cmd[1].clone(), count, cmd[3].clone()) } "ltrim" => { if cmd.len() != 4 { - return Err(DBError(format!("wrong number of arguments for LTRIM command"))); + return Err(DBError(format!( + "wrong number of arguments for LTRIM command" + ))); } - let start = cmd[2].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; - let stop = cmd[3].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let start = cmd[2].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; + let stop = cmd[3].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; Cmd::LTrim(cmd[1].clone(), start, stop) } "lindex" => { if cmd.len() != 3 { - return Err(DBError(format!("wrong number of arguments for LINDEX command"))); + return Err(DBError(format!( + "wrong number of arguments for LINDEX command" + ))); } - let index = cmd[2].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let index = cmd[2].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; Cmd::LIndex(cmd[1].clone(), index) } "lrange" => { if cmd.len() != 4 { - return Err(DBError(format!("wrong number of arguments for LRANGE command"))); + return Err(DBError(format!( + "wrong number of arguments for LRANGE command" + ))); } - let start = cmd[2].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; - let stop = cmd[3].parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?; + let start = cmd[2].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; + let stop = cmd[3].parse::().map_err(|_| { + DBError("ERR value is not an integer or out of range".to_string()) + })?; Cmd::LRange(cmd[1].clone(), start, stop) } "flushdb" => { if cmd.len() != 1 { - return Err(DBError("wrong number of arguments for FLUSHDB command".to_string())); + return Err(DBError( + "wrong number of arguments for FLUSHDB command".to_string(), + )); } Cmd::FlushDb } "age" => { if cmd.len() < 2 { - return Err(DBError("wrong number of arguments for AGE".to_string())); + return Err(DBError( + "wrong number of arguments for AGE".to_string(), + )); } match cmd[1].to_lowercase().as_str() { // stateless - "genenc" => { if cmd.len() != 2 { return Err(DBError("AGE GENENC takes no args".to_string())); } - Cmd::AgeGenEnc } - "gensign" => { if cmd.len() != 2 { return Err(DBError("AGE GENSIGN takes no args".to_string())); } - Cmd::AgeGenSign } - "encrypt" => { if cmd.len() != 4 { return Err(DBError("AGE ENCRYPT ".to_string())); } - Cmd::AgeEncrypt(cmd[2].clone(), cmd[3].clone()) } - "decrypt" => { if cmd.len() != 4 { return Err(DBError("AGE DECRYPT ".to_string())); } - Cmd::AgeDecrypt(cmd[2].clone(), cmd[3].clone()) } - "sign" => { if cmd.len() != 4 { return Err(DBError("AGE SIGN ".to_string())); } - Cmd::AgeSign(cmd[2].clone(), cmd[3].clone()) } - "verify" => { if cmd.len() != 5 { return Err(DBError("AGE VERIFY ".to_string())); } - Cmd::AgeVerify(cmd[2].clone(), cmd[3].clone(), cmd[4].clone()) } + "genenc" => { + if cmd.len() != 2 { + return Err(DBError( + "AGE GENENC takes no args".to_string(), + )); + } + Cmd::AgeGenEnc + } + "gensign" => { + if cmd.len() != 2 { + return Err(DBError( + "AGE GENSIGN takes no args".to_string(), + )); + } + Cmd::AgeGenSign + } + "encrypt" => { + if cmd.len() != 4 { + return Err(DBError( + "AGE ENCRYPT ".to_string(), + )); + } + Cmd::AgeEncrypt(cmd[2].clone(), cmd[3].clone()) + } + "decrypt" => { + if cmd.len() != 4 { + return Err(DBError( + "AGE DECRYPT ".to_string(), + )); + } + Cmd::AgeDecrypt(cmd[2].clone(), cmd[3].clone()) + } + "sign" => { + if cmd.len() != 4 { + return Err(DBError( + "AGE SIGN ".to_string(), + )); + } + Cmd::AgeSign(cmd[2].clone(), cmd[3].clone()) + } + "verify" => { + if cmd.len() != 5 { + return Err(DBError( + "AGE VERIFY " + .to_string(), + )); + } + Cmd::AgeVerify(cmd[2].clone(), cmd[3].clone(), cmd[4].clone()) + } // persistent names - "keygen" => { if cmd.len() != 3 { return Err(DBError("AGE KEYGEN ".to_string())); } - Cmd::AgeKeygen(cmd[2].clone()) } - "signkeygen" => { if cmd.len() != 3 { return Err(DBError("AGE SIGNKEYGEN ".to_string())); } - Cmd::AgeSignKeygen(cmd[2].clone()) } - "encryptname" => { if cmd.len() != 4 { return Err(DBError("AGE ENCRYPTNAME ".to_string())); } - Cmd::AgeEncryptName(cmd[2].clone(), cmd[3].clone()) } - "decryptname" => { if cmd.len() != 4 { return Err(DBError("AGE DECRYPTNAME ".to_string())); } - Cmd::AgeDecryptName(cmd[2].clone(), cmd[3].clone()) } - "signname" => { if cmd.len() != 4 { return Err(DBError("AGE SIGNNAME ".to_string())); } - Cmd::AgeSignName(cmd[2].clone(), cmd[3].clone()) } - "verifyname" => { if cmd.len() != 5 { return Err(DBError("AGE VERIFYNAME ".to_string())); } - Cmd::AgeVerifyName(cmd[2].clone(), cmd[3].clone(), cmd[4].clone()) } - "list" => { if cmd.len() != 2 { return Err(DBError("AGE LIST".to_string())); } - Cmd::AgeList } - _ => return Err(DBError(format!("unsupported AGE subcommand {:?}", cmd))), + "keygen" => { + if cmd.len() != 3 { + return Err(DBError("AGE KEYGEN ".to_string())); + } + Cmd::AgeKeygen(cmd[2].clone()) + } + "signkeygen" => { + if cmd.len() != 3 { + return Err(DBError("AGE SIGNKEYGEN ".to_string())); + } + Cmd::AgeSignKeygen(cmd[2].clone()) + } + "encryptname" => { + if cmd.len() != 4 { + return Err(DBError( + "AGE ENCRYPTNAME ".to_string(), + )); + } + Cmd::AgeEncryptName(cmd[2].clone(), cmd[3].clone()) + } + "decryptname" => { + if cmd.len() != 4 { + return Err(DBError( + "AGE DECRYPTNAME ".to_string(), + )); + } + Cmd::AgeDecryptName(cmd[2].clone(), cmd[3].clone()) + } + "signname" => { + if cmd.len() != 4 { + return Err(DBError( + "AGE SIGNNAME ".to_string(), + )); + } + Cmd::AgeSignName(cmd[2].clone(), cmd[3].clone()) + } + "verifyname" => { + if cmd.len() != 5 { + return Err(DBError( + "AGE VERIFYNAME " + .to_string(), + )); + } + Cmd::AgeVerifyName( + cmd[2].clone(), + cmd[3].clone(), + cmd[4].clone(), + ) + } + "list" => { + if cmd.len() != 2 { + return Err(DBError("AGE LIST".to_string())); + } + Cmd::AgeList + } + _ => { + return Err(DBError(format!( + "unsupported AGE subcommand {:?}", + cmd + ))) + } } } + "ft.create" => { + if cmd.len() < 4 || cmd[2].to_uppercase() != "SCHEMA" { + return Err(DBError("ERR FT.CREATE requires: indexname SCHEMA field1 type1 [options] ...".to_string())); + } + + let index_name = cmd[1].clone(); + let mut schema = Vec::new(); + let mut i = 3; + + while i < cmd.len() { + if i + 1 >= cmd.len() { + return Err(DBError( + "ERR incomplete field definition".to_string(), + )); + } + + let field_name = cmd[i].clone(); + let field_type = cmd[i + 1].to_uppercase(); + let mut options = Vec::new(); + i += 2; + + // Parse field options until we hit another field name or end + while i < cmd.len() + && [ + "WEIGHT", + "SORTABLE", + "NOINDEX", + "SEPARATOR", + "CASESENSITIVE", + ] + .contains(&cmd[i].to_uppercase().as_str()) + { + options.push(cmd[i].to_uppercase()); + i += 1; + + // If this option takes a value, consume it too + if i > 0 + && ["SEPARATOR", "WEIGHT"] + .contains(&cmd[i - 1].to_uppercase().as_str()) + && i < cmd.len() + { + options.push(cmd[i].clone()); + i += 1; + } + } + + schema.push((field_name, field_type, options)); + } + + Cmd::FtCreate { index_name, schema } + } + "ft.add" => { + if cmd.len() < 5 { + return Err(DBError( + "ERR FT.ADD requires: index_name doc_id score field value ..." + .to_string(), + )); + } + + let index_name = cmd[1].clone(); + let doc_id = cmd[2].clone(); + let score = cmd[3] + .parse::() + .map_err(|_| DBError("ERR score must be a number".to_string()))?; + + let mut fields = HashMap::new(); + let mut i = 4; + + while i + 1 < cmd.len() { + fields.insert(cmd[i].clone(), cmd[i + 1].clone()); + i += 2; + } + + Cmd::FtAdd { + index_name, + doc_id, + score, + fields, + } + } + "ft.search" => { + if cmd.len() < 3 { + return Err(DBError( + "ERR FT.SEARCH requires: index_name query [options]" + .to_string(), + )); + } + + let index_name = cmd[1].clone(); + let query = cmd[2].clone(); + + let mut filters = Vec::new(); + let mut limit = None; + let mut offset = None; + let mut return_fields = None; + + let mut i = 3; + while i < cmd.len() { + match cmd[i].to_uppercase().as_str() { + "FILTER" => { + if i + 2 >= cmd.len() { + return Err(DBError( + "ERR FILTER requires field and value".to_string(), + )); + } + filters.push((cmd[i + 1].clone(), cmd[i + 2].clone())); + i += 3; + } + "LIMIT" => { + if i + 2 >= cmd.len() { + return Err(DBError( + "ERR LIMIT requires offset and num".to_string(), + )); + } + offset = Some(cmd[i + 1].parse().unwrap_or(0)); + limit = Some(cmd[i + 2].parse().unwrap_or(10)); + i += 3; + } + "RETURN" => { + if i + 1 >= cmd.len() { + return Err(DBError( + "ERR RETURN requires field count".to_string(), + )); + } + let count: usize = cmd[i + 1].parse().unwrap_or(0); + i += 2; + + let mut fields = Vec::new(); + for _ in 0..count { + if i < cmd.len() { + fields.push(cmd[i].clone()); + i += 1; + } + } + return_fields = Some(fields); + } + _ => i += 1, + } + } + + Cmd::FtSearch { + index_name, + query, + filters, + limit, + offset, + return_fields, + } + } + "ft.del" => { + if cmd.len() != 3 { + return Err(DBError( + "ERR FT.DEL requires: index_name doc_id".to_string(), + )); + } + Cmd::FtDel(cmd[1].clone(), cmd[2].clone()) + } + "ft.info" => { + if cmd.len() != 2 { + return Err(DBError( + "ERR FT.INFO requires: index_name".to_string(), + )); + } + Cmd::FtInfo(cmd[1].clone()) + } + "ft.drop" => { + if cmd.len() != 2 { + return Err(DBError( + "ERR FT.DROP requires: index_name".to_string(), + )); + } + Cmd::FtDrop(cmd[1].clone()) + } _ => Cmd::Unknow(cmd[0].clone()), }, protocol, - remaining + remaining, )) } - _ => Err(DBError(format!( - "fail to parse as cmd for {:?}", - protocol - ))), + _ => Err(DBError(format!("fail to parse as cmd for {:?}", protocol))), } } @@ -649,13 +1089,15 @@ impl Cmd { Cmd::Set(k, v) => set_cmd(server, &k, &v).await, Cmd::SetPx(k, v, x) => set_px_cmd(server, &k, &v, &x).await, Cmd::SetEx(k, v, x) => set_ex_cmd(server, &k, &v, &x).await, - Cmd::SetOpts(k, v, ex_ms, nx, xx, getflag) => set_with_opts_cmd(server, &k, &v, ex_ms, nx, xx, getflag).await, + Cmd::SetOpts(k, v, ex_ms, nx, xx, getflag) => { + set_with_opts_cmd(server, &k, &v, ex_ms, nx, xx, getflag).await + } Cmd::MGet(keys) => mget_cmd(server, &keys).await, Cmd::MSet(pairs) => mset_cmd(server, &pairs).await, Cmd::Del(k) => del_cmd(server, &k).await, Cmd::DelMulti(keys) => del_multi_cmd(server, &keys).await, Cmd::ConfigGet(name) => config_get_cmd(&name, server), - Cmd::Keys => keys_cmd(server).await, + Cmd::Keys(pattern) => keys_cmd(server, &pattern).await, Cmd::DbSize => dbsize_cmd(server).await, Cmd::Info(section) => info_cmd(server, §ion).await, Cmd::Type(k) => type_cmd(server, &k).await, @@ -685,9 +1127,15 @@ impl Cmd { Cmd::HMGet(key, fields) => hmget_cmd(server, &key, &fields).await, Cmd::HSetNx(key, field, value) => hsetnx_cmd(server, &key, &field, &value).await, Cmd::HIncrBy(key, field, delta) => hincrby_cmd(server, &key, &field, delta).await, - Cmd::HIncrByFloat(key, field, delta) => hincrbyfloat_cmd(server, &key, &field, delta).await, - Cmd::HScan(key, cursor, pattern, count) => hscan_cmd(server, &key, &cursor, pattern.as_deref(), &count).await, - Cmd::Scan(cursor, pattern, count) => scan_cmd(server, &cursor, pattern.as_deref(), &count).await, + Cmd::HIncrByFloat(key, field, delta) => { + hincrbyfloat_cmd(server, &key, &field, delta).await + } + Cmd::HScan(key, cursor, pattern, count) => { + hscan_cmd(server, &key, &cursor, pattern.as_deref(), &count).await + } + Cmd::Scan(cursor, pattern, count) => { + scan_cmd(server, &cursor, pattern.as_deref(), &count).await + } Cmd::Ttl(key) => ttl_cmd(server, &key).await, Cmd::Expire(key, secs) => expire_cmd(server, &key, secs).await, Cmd::PExpire(key, ms) => pexpire_cmd(server, &key, ms).await, @@ -717,31 +1165,101 @@ impl Cmd { // AGE (rage): stateless Cmd::AgeGenEnc => Ok(crate::age::cmd_age_genenc().await), Cmd::AgeGenSign => Ok(crate::age::cmd_age_gensign().await), - Cmd::AgeEncrypt(recipient, message) => Ok(crate::age::cmd_age_encrypt(&recipient, &message).await), - Cmd::AgeDecrypt(identity, ct_b64) => Ok(crate::age::cmd_age_decrypt(&identity, &ct_b64).await), + Cmd::AgeEncrypt(recipient, message) => { + Ok(crate::age::cmd_age_encrypt(&recipient, &message).await) + } + Cmd::AgeDecrypt(identity, ct_b64) => { + Ok(crate::age::cmd_age_decrypt(&identity, &ct_b64).await) + } Cmd::AgeSign(secret, message) => Ok(crate::age::cmd_age_sign(&secret, &message).await), - Cmd::AgeVerify(vpub, msg, sig_b64) => Ok(crate::age::cmd_age_verify(&vpub, &msg, &sig_b64).await), + Cmd::AgeVerify(vpub, msg, sig_b64) => { + Ok(crate::age::cmd_age_verify(&vpub, &msg, &sig_b64).await) + } // AGE (rage): persistent named keys Cmd::AgeKeygen(name) => Ok(crate::age::cmd_age_keygen(server, &name).await), Cmd::AgeSignKeygen(name) => Ok(crate::age::cmd_age_signkeygen(server, &name).await), - Cmd::AgeEncryptName(name, message) => Ok(crate::age::cmd_age_encrypt_name(server, &name, &message).await), - Cmd::AgeDecryptName(name, ct_b64) => Ok(crate::age::cmd_age_decrypt_name(server, &name, &ct_b64).await), - Cmd::AgeSignName(name, message) => Ok(crate::age::cmd_age_sign_name(server, &name, &message).await), - Cmd::AgeVerifyName(name, message, sig_b64) => Ok(crate::age::cmd_age_verify_name(server, &name, &message, &sig_b64).await), + Cmd::AgeEncryptName(name, message) => { + Ok(crate::age::cmd_age_encrypt_name(server, &name, &message).await) + } + Cmd::AgeDecryptName(name, ct_b64) => { + Ok(crate::age::cmd_age_decrypt_name(server, &name, &ct_b64).await) + } + Cmd::AgeSignName(name, message) => { + Ok(crate::age::cmd_age_sign_name(server, &name, &message).await) + } + Cmd::AgeVerifyName(name, message, sig_b64) => { + Ok(crate::age::cmd_age_verify_name(server, &name, &message, &sig_b64).await) + } Cmd::AgeList => Ok(crate::age::cmd_age_list(server).await), + + // Full-text search commands + Cmd::FtCreate { index_name, schema } => { + search_cmd::ft_create_cmd(server, index_name, schema).await + } + Cmd::FtAdd { + index_name, + doc_id, + score, + fields, + } => search_cmd::ft_add_cmd(server, index_name, doc_id, score, fields).await, + Cmd::FtSearch { + index_name, + query, + filters, + limit, + offset, + return_fields, + } => { + search_cmd::ft_search_cmd( + server, + index_name, + query, + filters, + limit, + offset, + return_fields, + ) + .await + } + Cmd::FtDel(index_name, doc_id) => { + search_cmd::ft_del_cmd(server, index_name, doc_id).await + } + Cmd::FtInfo(index_name) => search_cmd::ft_info_cmd(server, index_name).await, + Cmd::FtDrop(index_name) => search_cmd::ft_drop_cmd(server, index_name).await, + Cmd::FtAlter { .. } => { + // Not implemented yet + Ok(Protocol::err("FT.ALTER not implemented yet")) + } + Cmd::FtAggregate { .. } => { + // Not implemented yet + Ok(Protocol::err("FT.AGGREGATE not implemented yet")) + } Cmd::Unknow(s) => Ok(Protocol::err(&format!("ERR unknown command `{}`", s))), } } - + pub fn to_protocol(self) -> Protocol { match self { - Cmd::Select(db) => Protocol::Array(vec![Protocol::BulkString("select".to_string()), Protocol::BulkString(db.to_string())]), + Cmd::Select(db) => Protocol::Array(vec![ + Protocol::BulkString("select".to_string()), + Protocol::BulkString(db.to_string()), + ]), Cmd::Ping => Protocol::Array(vec![Protocol::BulkString("ping".to_string())]), - Cmd::Echo(s) => Protocol::Array(vec![Protocol::BulkString("echo".to_string()), Protocol::BulkString(s)]), - Cmd::Get(k) => Protocol::Array(vec![Protocol::BulkString("get".to_string()), Protocol::BulkString(k)]), - Cmd::Set(k, v) => Protocol::Array(vec![Protocol::BulkString("set".to_string()), Protocol::BulkString(k), Protocol::BulkString(v)]), - _ => Protocol::SimpleString("...".to_string()) + Cmd::Echo(s) => Protocol::Array(vec![ + Protocol::BulkString("echo".to_string()), + Protocol::BulkString(s), + ]), + Cmd::Get(k) => Protocol::Array(vec![ + Protocol::BulkString("get".to_string()), + Protocol::BulkString(k), + ]), + Cmd::Set(k, v) => Protocol::Array(vec![ + Protocol::BulkString("set".to_string()), + Protocol::BulkString(k), + Protocol::BulkString(v), + ]), + _ => Protocol::SimpleString("...".to_string()), } } } @@ -770,9 +1288,16 @@ async fn lindex_cmd(server: &Server, key: &str, index: i64) -> Result Result { +async fn lrange_cmd( + server: &Server, + key: &str, + start: i64, + stop: i64, +) -> Result { match server.current_storage()?.lrange(key, start, stop) { - Ok(elements) => Ok(Protocol::Array(elements.into_iter().map(Protocol::BulkString).collect())), + Ok(elements) => Ok(Protocol::Array( + elements.into_iter().map(Protocol::BulkString).collect(), + )), Err(e) => Ok(Protocol::err(&e.0)), } } @@ -784,7 +1309,12 @@ async fn ltrim_cmd(server: &Server, key: &str, start: i64, stop: i64) -> Result< } } -async fn lrem_cmd(server: &Server, key: &str, count: i64, element: &str) -> Result { +async fn lrem_cmd( + server: &Server, + key: &str, + count: i64, + element: &str, +) -> Result { match server.current_storage()?.lrem(key, count, element) { Ok(removed_count) => Ok(Protocol::SimpleString(removed_count.to_string())), Err(e) => Ok(Protocol::err(&e.0)), @@ -809,11 +1339,13 @@ async fn lpop_cmd(server: &Server, key: &str, count: &Option) -> Result Ok(Protocol::err(&e.0)), } } @@ -829,17 +1361,23 @@ async fn rpop_cmd(server: &Server, key: &str, count: &Option) -> Result Ok(Protocol::err(&e.0)), } } // BLPOP implementation -async fn blpop_cmd(server: &Server, keys: &[String], timeout_secs: f64) -> Result { +async fn blpop_cmd( + server: &Server, + keys: &[String], + timeout_secs: f64, +) -> Result { // Immediate, non-blocking attempt in key order for k in keys { let elems = server.current_storage()?.lpop(k, 1)?; @@ -860,10 +1398,13 @@ async fn blpop_cmd(server: &Server, keys: &[String], timeout_secs: f64) -> Resul let db_index = server.selected_db; let mut ids: Vec = Vec::with_capacity(keys.len()); let mut names: Vec = Vec::with_capacity(keys.len()); - let mut rxs: Vec> = Vec::with_capacity(keys.len()); + let mut rxs: Vec> = + Vec::with_capacity(keys.len()); for k in keys { - let (id, rx) = server.register_waiter(db_index, k, crate::server::PopSide::Left).await; + let (id, rx) = server + .register_waiter(db_index, k, crate::server::PopSide::Left) + .await; ids.push(id); names.push(k.clone()); rxs.push(rx); @@ -921,7 +1462,11 @@ async fn blpop_cmd(server: &Server, keys: &[String], timeout_secs: f64) -> Resul } // BRPOP implementation (mirror of BLPOP, popping from the right) -async fn brpop_cmd(server: &Server, keys: &[String], timeout_secs: f64) -> Result { +async fn brpop_cmd( + server: &Server, + keys: &[String], + timeout_secs: f64, +) -> Result { // Immediate, non-blocking attempt in key order using RPOP for k in keys { let elems = server.current_storage()?.rpop(k, 1)?; @@ -942,10 +1487,13 @@ async fn brpop_cmd(server: &Server, keys: &[String], timeout_secs: f64) -> Resul let db_index = server.selected_db; let mut ids: Vec = Vec::with_capacity(keys.len()); let mut names: Vec = Vec::with_capacity(keys.len()); - let mut rxs: Vec> = Vec::with_capacity(keys.len()); + let mut rxs: Vec> = + Vec::with_capacity(keys.len()); for k in keys { - let (id, rx) = server.register_waiter(db_index, k, crate::server::PopSide::Right).await; + let (id, rx) = server + .register_waiter(db_index, k, crate::server::PopSide::Right) + .await; ids.push(id); names.push(k.clone()); rxs.push(rx); @@ -1044,17 +1592,15 @@ async fn exec_cmd(server: &mut Server) -> Result { async fn incr_cmd(server: &Server, key: &String) -> Result { let storage = server.current_storage()?; let current_value = storage.get(key)?; - + let new_value = match current_value { - Some(v) => { - match v.parse::() { - Ok(num) => num + 1, - Err(_) => return Ok(Protocol::err("ERR value is not an integer or out of range")), - } - } + Some(v) => match v.parse::() { + Ok(num) => num + 1, + Err(_) => return Ok(Protocol::err("ERR value is not an integer or out of range")), + }, None => 1, }; - + storage.set(key.clone(), new_value.to_string())?; Ok(Protocol::SimpleString(new_value.to_string())) } @@ -1078,8 +1624,8 @@ fn config_get_cmd(name: &String, server: &Server) -> Result { } } -async fn keys_cmd(server: &Server) -> Result { - let keys = server.current_storage()?.keys("*")?; +async fn keys_cmd(server: &Server, pattern: &str) -> Result { + let keys = server.current_storage()?.keys(pattern)?; Ok(Protocol::Array( keys.into_iter().map(Protocol::BulkString).collect(), )) @@ -1094,21 +1640,34 @@ async fn dbsize_cmd(server: &Server) -> Result { async fn info_cmd(server: &Server, section: &Option) -> Result { let storage_info = server.current_storage()?.info()?; - let mut info_map: std::collections::HashMap = storage_info.into_iter().collect(); + let mut info_map: std::collections::HashMap = + storage_info.into_iter().collect(); info_map.insert("redis_version".to_string(), "7.0.0".to_string()); info_map.insert("selected_db".to_string(), server.selected_db.to_string()); - info_map.insert("backend".to_string(), format!("{:?}", server.option.backend)); - + info_map.insert( + "backend".to_string(), + format!("{:?}", server.option.backend), + ); let mut info_string = String::new(); info_string.push_str("# Server\n"); - info_string.push_str(&format!("redis_version:{}\n", info_map.get("redis_version").unwrap())); + info_string.push_str(&format!( + "redis_version:{}\n", + info_map.get("redis_version").unwrap() + )); info_string.push_str(&format!("backend:{}\n", info_map.get("backend").unwrap())); - info_string.push_str(&format!("encrypted:{}\n", info_map.get("is_encrypted").unwrap())); - + info_string.push_str(&format!( + "encrypted:{}\n", + info_map.get("is_encrypted").unwrap() + )); + info_string.push_str("# Keyspace\n"); - info_string.push_str(&format!("db{}:keys={},expires=0,avg_ttl=0\n", info_map.get("selected_db").unwrap(), info_map.get("db_size").unwrap())); + info_string.push_str(&format!( + "db{}:keys={},expires=0,avg_ttl=0\n", + info_map.get("selected_db").unwrap(), + info_map.get("db_size").unwrap() + )); match section { Some(s) => { @@ -1138,28 +1697,24 @@ async fn del_cmd(server: &Server, k: &str) -> Result { Ok(Protocol::SimpleString("1".to_string())) } -async fn set_ex_cmd( - server: &Server, - k: &str, - v: &str, - x: &u128, -) -> Result { - server.current_storage()?.setx(k.to_string(), v.to_string(), *x * 1000)?; +async fn set_ex_cmd(server: &Server, k: &str, v: &str, x: &u128) -> Result { + server + .current_storage()? + .setx(k.to_string(), v.to_string(), *x * 1000)?; Ok(Protocol::SimpleString("OK".to_string())) } -async fn set_px_cmd( - server: &Server, - k: &str, - v: &str, - x: &u128, -) -> Result { - server.current_storage()?.setx(k.to_string(), v.to_string(), *x)?; +async fn set_px_cmd(server: &Server, k: &str, v: &str, x: &u128) -> Result { + server + .current_storage()? + .setx(k.to_string(), v.to_string(), *x)?; Ok(Protocol::SimpleString("OK".to_string())) } async fn set_cmd(server: &Server, k: &str, v: &str) -> Result { - server.current_storage()?.set(k.to_string(), v.to_string())?; + server + .current_storage()? + .set(k.to_string(), v.to_string())?; Ok(Protocol::SimpleString("OK".to_string())) } @@ -1188,11 +1743,7 @@ async fn set_with_opts_cmd( } // Fetch old value if needed for GET - let old_val = if get_old { - storage.get(key)? - } else { - None - }; + let old_val = if get_old { storage.get(key)? } else { None }; if should_set { if let Some(ms) = ex_ms { @@ -1272,7 +1823,11 @@ async fn get_cmd(server: &Server, k: &str) -> Result { } // Hash command implementations -async fn hset_cmd(server: &Server, key: &str, pairs: &[(String, String)]) -> Result { +async fn hset_cmd( + server: &Server, + key: &str, + pairs: &[(String, String)], +) -> Result { let new_fields = server.current_storage()?.hset(key, pairs.to_vec())?; Ok(Protocol::SimpleString(new_fields.to_string())) } @@ -1308,7 +1863,9 @@ async fn hdel_cmd(server: &Server, key: &str, fields: &[String]) -> Result Result { match server.current_storage()?.hexists(key, field) { - Ok(exists) => Ok(Protocol::SimpleString(if exists { "1" } else { "0" }.to_string())), + Ok(exists) => Ok(Protocol::SimpleString( + if exists { "1" } else { "0" }.to_string(), + )), Err(e) => Ok(Protocol::err(&e.0)), } } @@ -1351,31 +1908,54 @@ async fn hmget_cmd(server: &Server, key: &str, fields: &[String]) -> Result Result { +async fn hsetnx_cmd( + server: &Server, + key: &str, + field: &str, + value: &str, +) -> Result { match server.current_storage()?.hsetnx(key, field, value) { - Ok(was_set) => Ok(Protocol::SimpleString(if was_set { "1" } else { "0" }.to_string())), + Ok(was_set) => Ok(Protocol::SimpleString( + if was_set { "1" } else { "0" }.to_string(), + )), Err(e) => Ok(Protocol::err(&e.0)), } } -async fn hincrby_cmd(server: &Server, key: &str, field: &str, delta: i64) -> Result { +async fn hincrby_cmd( + server: &Server, + key: &str, + field: &str, + delta: i64, +) -> Result { let storage = server.current_storage()?; let current = storage.hget(key, field)?; let base: i64 = match current { - Some(v) => v.parse::().map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?, + Some(v) => v + .parse::() + .map_err(|_| DBError("ERR value is not an integer or out of range".to_string()))?, None => 0, }; - let new_val = base.checked_add(delta).ok_or_else(|| DBError("ERR increment or decrement would overflow".to_string()))?; + let new_val = base + .checked_add(delta) + .ok_or_else(|| DBError("ERR increment or decrement would overflow".to_string()))?; // Update the field storage.hset(key, vec![(field.to_string(), new_val.to_string())])?; Ok(Protocol::SimpleString(new_val.to_string())) } -async fn hincrbyfloat_cmd(server: &Server, key: &str, field: &str, delta: f64) -> Result { +async fn hincrbyfloat_cmd( + server: &Server, + key: &str, + field: &str, + delta: f64, +) -> Result { let storage = server.current_storage()?; let current = storage.hget(key, field)?; let base: f64 = match current { - Some(v) => v.parse::().map_err(|_| DBError("ERR value is not a valid float".to_string()))?, + Some(v) => v + .parse::() + .map_err(|_| DBError("ERR value is not a valid float".to_string()))?, None => 0.0, }; let new_val = base + delta; @@ -1388,14 +1968,17 @@ async fn scan_cmd( server: &Server, cursor: &u64, pattern: Option<&str>, - count: &Option + count: &Option, ) -> Result { match server.current_storage()?.scan(*cursor, pattern, *count) { Ok((next_cursor, key_value_pairs)) => { let mut result = Vec::new(); result.push(Protocol::BulkString(next_cursor.to_string())); // For SCAN, we only return the keys, not the values - let keys: Vec = key_value_pairs.into_iter().map(|(key, _)| Protocol::BulkString(key)).collect(); + let keys: Vec = key_value_pairs + .into_iter() + .map(|(key, _)| Protocol::BulkString(key)) + .collect(); result.push(Protocol::Array(keys)); Ok(Protocol::Array(result)) } @@ -1408,9 +1991,12 @@ async fn hscan_cmd( key: &str, cursor: &u64, pattern: Option<&str>, - count: &Option + count: &Option, ) -> Result { - match server.current_storage()?.hscan(key, *cursor, pattern, *count) { + match server + .current_storage()? + .hscan(key, *cursor, pattern, *count) + { Ok((next_cursor, field_value_pairs)) => { let mut result = Vec::new(); result.push(Protocol::BulkString(next_cursor.to_string())); @@ -1436,7 +2022,9 @@ async fn ttl_cmd(server: &Server, key: &str) -> Result { async fn exists_cmd(server: &Server, key: &str) -> Result { match server.current_storage()?.exists(key) { - Ok(exists) => Ok(Protocol::SimpleString(if exists { "1" } else { "0" }.to_string())), + Ok(exists) => Ok(Protocol::SimpleString( + if exists { "1" } else { "0" }.to_string(), + )), Err(e) => Ok(Protocol::err(&e.0)), } } @@ -1447,7 +2035,9 @@ async fn expire_cmd(server: &Server, key: &str, secs: i64) -> Result Ok(Protocol::SimpleString(if applied { "1" } else { "0" }.to_string())), + Ok(applied) => Ok(Protocol::SimpleString( + if applied { "1" } else { "0" }.to_string(), + )), Err(e) => Ok(Protocol::err(&e.0)), } } @@ -1458,7 +2048,9 @@ async fn pexpire_cmd(server: &Server, key: &str, ms: i64) -> Result Ok(Protocol::SimpleString(if applied { "1" } else { "0" }.to_string())), + Ok(applied) => Ok(Protocol::SimpleString( + if applied { "1" } else { "0" }.to_string(), + )), Err(e) => Ok(Protocol::err(&e.0)), } } @@ -1466,14 +2058,18 @@ async fn pexpire_cmd(server: &Server, key: &str, ms: i64) -> Result 1 if timeout removed, 0 otherwise async fn persist_cmd(server: &Server, key: &str) -> Result { match server.current_storage()?.persist(key) { - Ok(removed) => Ok(Protocol::SimpleString(if removed { "1" } else { "0" }.to_string())), + Ok(removed) => Ok(Protocol::SimpleString( + if removed { "1" } else { "0" }.to_string(), + )), Err(e) => Ok(Protocol::err(&e.0)), } } // EXPIREAT key timestamp-seconds -> 1 if timeout set, 0 otherwise async fn expireat_cmd(server: &Server, key: &str, ts_secs: i64) -> Result { match server.current_storage()?.expire_at_seconds(key, ts_secs) { - Ok(applied) => Ok(Protocol::SimpleString(if applied { "1" } else { "0" }.to_string())), + Ok(applied) => Ok(Protocol::SimpleString( + if applied { "1" } else { "0" }.to_string(), + )), Err(e) => Ok(Protocol::err(&e.0)), } } @@ -1481,7 +2077,9 @@ async fn expireat_cmd(server: &Server, key: &str, ts_secs: i64) -> Result 1 if timeout set, 0 otherwise async fn pexpireat_cmd(server: &Server, key: &str, ts_ms: i64) -> Result { match server.current_storage()?.pexpire_at_millis(key, ts_ms) { - Ok(applied) => Ok(Protocol::SimpleString(if applied { "1" } else { "0" }.to_string())), + Ok(applied) => Ok(Protocol::SimpleString( + if applied { "1" } else { "0" }.to_string(), + )), Err(e) => Ok(Protocol::err(&e.0)), } } diff --git a/src/crypto.rs b/src/crypto.rs index 48a9f8c..db7a3ec 100644 --- a/src/crypto.rs +++ b/src/crypto.rs @@ -11,9 +11,9 @@ const TAG_LEN: usize = 16; #[derive(Debug)] pub enum CryptoError { - Format, // wrong length / header - Version(u8), // unknown version - Decrypt, // wrong key or corrupted data + Format, // wrong length / header + Version(u8), // unknown version + Decrypt, // wrong key or corrupted data } impl From for crate::error::DBError { @@ -71,4 +71,4 @@ impl CryptoFactory { let cipher = XChaCha20Poly1305::new(&self.key); cipher.decrypt(nonce, ct).map_err(|_| CryptoError::Decrypt) } -} \ No newline at end of file +} diff --git a/src/error.rs b/src/error.rs index 3037c70..25314ff 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,9 +1,8 @@ use std::num::ParseIntError; -use tokio::sync::mpsc; -use redb; use bincode; - +use redb; +use tokio::sync::mpsc; // todo: more error types #[derive(Debug)] diff --git a/src/lib.rs b/src/lib.rs index 31e69a8..b2e5bfd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,10 +1,12 @@ -pub mod age; // NEW +pub mod age; // NEW pub mod cmd; pub mod crypto; pub mod error; pub mod options; pub mod protocol; +pub mod search_cmd; // Add this pub mod server; pub mod storage; -pub mod storage_trait; // Add this -pub mod storage_sled; // Add this +pub mod storage_sled; // Add this +pub mod storage_trait; // Add this +pub mod tantivy_search; diff --git a/src/main.rs b/src/main.rs index dce569b..c2c9ed6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -22,7 +22,6 @@ struct Args { #[arg(long)] debug: bool, - /// Master encryption key for encrypted databases #[arg(long)] encryption_key: Option, diff --git a/src/protocol.rs b/src/protocol.rs index 6025074..22587eb 100644 --- a/src/protocol.rs +++ b/src/protocol.rs @@ -81,18 +81,21 @@ impl Protocol { pub fn encode(&self) -> String { match self { Protocol::SimpleString(s) => format!("+{}\r\n", s), - Protocol::BulkString(s) => format!("${}\r\n{}\r\n", s.len(), s), - Protocol::Array(ss) => { + Protocol::BulkString(s) => format!("${}\r\n{}\r\n", s.len(), s), + Protocol::Array(ss) => { format!("*{}\r\n", ss.len()) + &ss.iter().map(|x| x.encode()).collect::() } - Protocol::Null => "$-1\r\n".to_string(), - Protocol::Error(s) => format!("-{}\r\n", s), // proper RESP error + Protocol::Null => "$-1\r\n".to_string(), + Protocol::Error(s) => format!("-{}\r\n", s), // proper RESP error } } fn parse_simple_string_sfx(protocol: &str) -> Result<(Self, &str), DBError> { match protocol.find("\r\n") { - Some(x) => Ok((Self::SimpleString(protocol[..x].to_string()), &protocol[x + 2..])), + Some(x) => Ok(( + Self::SimpleString(protocol[..x].to_string()), + &protocol[x + 2..], + )), _ => Err(DBError(format!( "[new simple string] unsupported protocol: {:?}", protocol diff --git a/src/search_cmd.rs b/src/search_cmd.rs new file mode 100644 index 0000000..c227cfe --- /dev/null +++ b/src/search_cmd.rs @@ -0,0 +1,273 @@ +use crate::{ + error::DBError, + protocol::Protocol, + server::Server, + tantivy_search::{ + FieldDef, Filter, FilterType, IndexConfig, NumericType, SearchOptions, TantivySearch, + }, +}; +use std::collections::HashMap; +use std::sync::Arc; + +pub async fn ft_create_cmd( + server: &Server, + index_name: String, + schema: Vec<(String, String, Vec)>, +) -> Result { + // Parse schema into field definitions + let mut field_definitions = Vec::new(); + + for (field_name, field_type, options) in schema { + let field_def = match field_type.to_uppercase().as_str() { + "TEXT" => { + let mut weight = 1.0; + let mut sortable = false; + let mut no_index = false; + + for opt in &options { + match opt.to_uppercase().as_str() { + "WEIGHT" => { + // Next option should be the weight value + if let Some(idx) = options.iter().position(|x| x == opt) { + if idx + 1 < options.len() { + weight = options[idx + 1].parse().unwrap_or(1.0); + } + } + } + "SORTABLE" => sortable = true, + "NOINDEX" => no_index = true, + _ => {} + } + } + + FieldDef::Text { + stored: true, + indexed: !no_index, + tokenized: true, + fast: sortable, + } + } + "NUMERIC" => { + let mut sortable = false; + + for opt in &options { + if opt.to_uppercase() == "SORTABLE" { + sortable = true; + } + } + + FieldDef::Numeric { + stored: true, + indexed: true, + fast: sortable, + precision: NumericType::F64, + } + } + "TAG" => { + let mut separator = ",".to_string(); + let mut case_sensitive = false; + + for i in 0..options.len() { + match options[i].to_uppercase().as_str() { + "SEPARATOR" => { + if i + 1 < options.len() { + separator = options[i + 1].clone(); + } + } + "CASESENSITIVE" => case_sensitive = true, + _ => {} + } + } + + FieldDef::Tag { + stored: true, + separator, + case_sensitive, + } + } + "GEO" => FieldDef::Geo { stored: true }, + _ => { + return Err(DBError(format!("Unknown field type: {}", field_type))); + } + }; + + field_definitions.push((field_name, field_def)); + } + + // Create the search index + let search_path = server.search_index_path(); + let config = IndexConfig::default(); + + println!( + "Creating search index '{}' at path: {:?}", + index_name, search_path + ); + println!("Field definitions: {:?}", field_definitions); + + let search_index = TantivySearch::new_with_schema( + search_path, + index_name.clone(), + field_definitions, + Some(config), + )?; + + println!("Search index '{}' created successfully", index_name); + + // Store in registry + let mut indexes = server.search_indexes.write().unwrap(); + indexes.insert(index_name, Arc::new(search_index)); + + Ok(Protocol::SimpleString("OK".to_string())) +} + +pub async fn ft_add_cmd( + server: &Server, + index_name: String, + doc_id: String, + _score: f64, + fields: HashMap, +) -> Result { + let indexes = server.search_indexes.read().unwrap(); + + let search_index = indexes + .get(&index_name) + .ok_or_else(|| DBError(format!("Index '{}' not found", index_name)))?; + + search_index.add_document_with_fields(&doc_id, fields)?; + + Ok(Protocol::SimpleString("OK".to_string())) +} + +pub async fn ft_search_cmd( + server: &Server, + index_name: String, + query: String, + filters: Vec<(String, String)>, + limit: Option, + offset: Option, + return_fields: Option>, +) -> Result { + let indexes = server.search_indexes.read().unwrap(); + + let search_index = indexes + .get(&index_name) + .ok_or_else(|| DBError(format!("Index '{}' not found", index_name)))?; + + // Convert filters to search filters + let search_filters = filters + .into_iter() + .map(|(field, value)| Filter { + field, + filter_type: FilterType::Equals(value), + }) + .collect(); + + let options = SearchOptions { + limit: limit.unwrap_or(10), + offset: offset.unwrap_or(0), + filters: search_filters, + sort_by: None, + return_fields, + highlight: false, + }; + + let results = search_index.search_with_options(&query, options)?; + + // Format results as Redis protocol + let mut response = Vec::new(); + + // First element is the total count + response.push(Protocol::SimpleString(results.total.to_string())); + + // Then each document + for doc in results.documents { + let mut doc_array = Vec::new(); + + // Add document ID if it exists + if let Some(id) = doc.fields.get("_id") { + doc_array.push(Protocol::BulkString(id.clone())); + } + + // Add score + doc_array.push(Protocol::BulkString(doc.score.to_string())); + + // Add fields as key-value pairs + for (field_name, field_value) in doc.fields { + if field_name != "_id" { + doc_array.push(Protocol::BulkString(field_name)); + doc_array.push(Protocol::BulkString(field_value)); + } + } + + response.push(Protocol::Array(doc_array)); + } + + Ok(Protocol::Array(response)) +} + +pub async fn ft_del_cmd( + server: &Server, + index_name: String, + doc_id: String, +) -> Result { + let indexes = server.search_indexes.read().unwrap(); + + let _search_index = indexes + .get(&index_name) + .ok_or_else(|| DBError(format!("Index '{}' not found", index_name)))?; + + // For now, return success + // In a full implementation, we'd need to add a delete method to TantivySearch + println!("Deleting document '{}' from index '{}'", doc_id, index_name); + + Ok(Protocol::SimpleString("1".to_string())) +} + +pub async fn ft_info_cmd(server: &Server, index_name: String) -> Result { + let indexes = server.search_indexes.read().unwrap(); + + let search_index = indexes + .get(&index_name) + .ok_or_else(|| DBError(format!("Index '{}' not found", index_name)))?; + + let info = search_index.get_info()?; + + // Format info as Redis protocol + let mut response = Vec::new(); + + response.push(Protocol::BulkString("index_name".to_string())); + response.push(Protocol::BulkString(info.name)); + + response.push(Protocol::BulkString("num_docs".to_string())); + response.push(Protocol::BulkString(info.num_docs.to_string())); + + response.push(Protocol::BulkString("num_fields".to_string())); + response.push(Protocol::BulkString(info.fields.len().to_string())); + + response.push(Protocol::BulkString("fields".to_string())); + let fields_str = info + .fields + .iter() + .map(|f| format!("{}:{}", f.name, f.field_type)) + .collect::>() + .join(", "); + response.push(Protocol::BulkString(fields_str)); + + Ok(Protocol::Array(response)) +} + +pub async fn ft_drop_cmd(server: &Server, index_name: String) -> Result { + let mut indexes = server.search_indexes.write().unwrap(); + + if indexes.remove(&index_name).is_some() { + // Also remove the index files from disk + let index_path = server.search_index_path().join(&index_name); + if index_path.exists() { + std::fs::remove_dir_all(index_path) + .map_err(|e| DBError(format!("Failed to remove index files: {}", e)))?; + } + Ok(Protocol::SimpleString("OK".to_string())) + } else { + Err(DBError(format!("Index '{}' not found", index_name))) + } +} diff --git a/src/server.rs b/src/server.rs index a6e43e2..af631f8 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,9 +1,10 @@ use core::str; use std::collections::HashMap; use std::sync::Arc; +use std::sync::RwLock; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; -use tokio::sync::{Mutex, oneshot}; +use tokio::sync::{oneshot, Mutex}; use std::sync::atomic::{AtomicU64, Ordering}; @@ -14,10 +15,12 @@ use crate::protocol::Protocol; use crate::storage::Storage; use crate::storage_sled::SledStorage; use crate::storage_trait::StorageBackend; +use crate::tantivy_search::TantivySearch; #[derive(Clone)] pub struct Server { - pub db_cache: std::sync::Arc>>>, + pub db_cache: Arc>>>, + pub search_indexes: Arc>>>, pub option: options::DBOption, pub client_name: Option, pub selected_db: u64, // Changed from usize to u64 @@ -43,7 +46,8 @@ pub enum PopSide { impl Server { pub async fn new(option: options::DBOption) -> Self { Server { - db_cache: Arc::new(std::sync::RwLock::new(HashMap::new())), + db_cache: Arc::new(RwLock::new(HashMap::new())), + search_indexes: Arc::new(RwLock::new(HashMap::new())), option, client_name: None, selected_db: 0, @@ -56,54 +60,63 @@ impl Server { pub fn current_storage(&self) -> Result, DBError> { let mut cache = self.db_cache.write().unwrap(); - + if let Some(storage) = cache.get(&self.selected_db) { return Ok(storage.clone()); } - - + // Create new database file let db_file_path = std::path::PathBuf::from(self.option.dir.clone()) .join(format!("{}.db", self.selected_db)); - + // Ensure the directory exists before creating the database file if let Some(parent_dir) = db_file_path.parent() { std::fs::create_dir_all(parent_dir).map_err(|e| { - DBError(format!("Failed to create directory {}: {}", parent_dir.display(), e)) + DBError(format!( + "Failed to create directory {}: {}", + parent_dir.display(), + e + )) })?; } - + println!("Creating new db file: {}", db_file_path.display()); - + let storage: Arc = match self.option.backend { - options::BackendType::Redb => { - Arc::new(Storage::new( - db_file_path, - self.should_encrypt_db(self.selected_db), - self.option.encryption_key.as_deref() - )?) - } - options::BackendType::Sled => { - Arc::new(SledStorage::new( - db_file_path, - self.should_encrypt_db(self.selected_db), - self.option.encryption_key.as_deref() - )?) - } + options::BackendType::Redb => Arc::new(Storage::new( + db_file_path, + self.should_encrypt_db(self.selected_db), + self.option.encryption_key.as_deref(), + )?), + options::BackendType::Sled => Arc::new(SledStorage::new( + db_file_path, + self.should_encrypt_db(self.selected_db), + self.option.encryption_key.as_deref(), + )?), }; - + cache.insert(self.selected_db, storage.clone()); Ok(storage) } - + fn should_encrypt_db(&self, db_index: u64) -> bool { // DB 0-9 are non-encrypted, DB 10+ are encrypted self.option.encrypt && db_index >= 10 } + // Add method to get search index path + pub fn search_index_path(&self) -> std::path::PathBuf { + std::path::PathBuf::from(&self.option.dir).join("search_indexes") + } + // ----- BLPOP waiter helpers ----- - pub async fn register_waiter(&self, db_index: u64, key: &str, side: PopSide) -> (u64, oneshot::Receiver<(String, String)>) { + pub async fn register_waiter( + &self, + db_index: u64, + key: &str, + side: PopSide, + ) -> (u64, oneshot::Receiver<(String, String)>) { let id = self.waiter_seq.fetch_add(1, Ordering::Relaxed); let (tx, rx) = oneshot::channel::<(String, String)>(); @@ -179,10 +192,7 @@ impl Server { Ok(()) } - pub async fn handle( - &mut self, - mut stream: tokio::net::TcpStream, - ) -> Result<(), DBError> { + pub async fn handle(&mut self, mut stream: tokio::net::TcpStream) -> Result<(), DBError> { // Accumulate incoming bytes to handle partial RESP frames let mut acc = String::new(); let mut buf = vec![0u8; 8192]; @@ -219,7 +229,10 @@ impl Server { acc = remaining.to_string(); if self.option.debug { - println!("\x1b[34;1mgot command: {:?}, protocol: {:?}\x1b[0m", cmd, protocol); + println!( + "\x1b[34;1mgot command: {:?}, protocol: {:?}\x1b[0m", + cmd, protocol + ); } else { println!("got command: {:?}, protocol: {:?}", cmd, protocol); } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index abc2cd5..c52d456 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -12,9 +12,9 @@ use crate::error::DBError; // Re-export modules mod storage_basic; +mod storage_extra; mod storage_hset; mod storage_lists; -mod storage_extra; // Re-export implementations // Note: These imports are used by the impl blocks in the submodules @@ -28,7 +28,8 @@ const STRINGS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("string const HASHES_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("hashes"); const LISTS_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("lists"); const STREAMS_META_TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("streams_meta"); -const STREAMS_DATA_TABLE: TableDefinition<(&str, &str), &[u8]> = TableDefinition::new("streams_data"); +const STREAMS_DATA_TABLE: TableDefinition<(&str, &str), &[u8]> = + TableDefinition::new("streams_data"); const ENCRYPTED_TABLE: TableDefinition<&str, u8> = TableDefinition::new("encrypted"); const EXPIRATION_TABLE: TableDefinition<&str, u64> = TableDefinition::new("expiration"); @@ -55,9 +56,13 @@ pub struct Storage { } impl Storage { - pub fn new(path: impl AsRef, should_encrypt: bool, master_key: Option<&str>) -> Result { + pub fn new( + path: impl AsRef, + should_encrypt: bool, + master_key: Option<&str>, + ) -> Result { let db = Database::create(path)?; - + // Create tables if they don't exist let write_txn = db.begin_write()?; { @@ -71,23 +76,28 @@ impl Storage { let _ = write_txn.open_table(EXPIRATION_TABLE)?; } write_txn.commit()?; - + // Check if database was previously encrypted let read_txn = db.begin_read()?; let encrypted_table = read_txn.open_table(ENCRYPTED_TABLE)?; - let was_encrypted = encrypted_table.get("encrypted")?.map(|v| v.value() == 1).unwrap_or(false); + let was_encrypted = encrypted_table + .get("encrypted")? + .map(|v| v.value() == 1) + .unwrap_or(false); drop(read_txn); - + let crypto = if should_encrypt || was_encrypted { if let Some(key) = master_key { Some(CryptoFactory::new(key.as_bytes())) } else { - return Err(DBError("Encryption requested but no master key provided".to_string())); + return Err(DBError( + "Encryption requested but no master key provided".to_string(), + )); } } else { None }; - + // If we're enabling encryption for the first time, mark it if should_encrypt && !was_encrypted { let write_txn = db.begin_write()?; @@ -97,13 +107,10 @@ impl Storage { } write_txn.commit()?; } - - Ok(Storage { - db, - crypto, - }) + + Ok(Storage { db, crypto }) } - + pub fn is_encrypted(&self) -> bool { self.crypto.is_some() } @@ -116,7 +123,7 @@ impl Storage { Ok(data.to_vec()) } } - + fn decrypt_if_needed(&self, data: &[u8]) -> Result, DBError> { if let Some(crypto) = &self.crypto { Ok(crypto.decrypt(data)?) @@ -165,11 +172,22 @@ impl StorageBackend for Storage { self.get_key_type(key) } - fn scan(&self, cursor: u64, pattern: Option<&str>, count: Option) -> Result<(u64, Vec<(String, String)>), DBError> { + fn scan( + &self, + cursor: u64, + pattern: Option<&str>, + count: Option, + ) -> Result<(u64, Vec<(String, String)>), DBError> { self.scan(cursor, pattern, count) } - fn hscan(&self, key: &str, cursor: u64, pattern: Option<&str>, count: Option) -> Result<(u64, Vec<(String, String)>), DBError> { + fn hscan( + &self, + key: &str, + cursor: u64, + pattern: Option<&str>, + count: Option, + ) -> Result<(u64, Vec<(String, String)>), DBError> { self.hscan(key, cursor, pattern, count) } @@ -276,7 +294,7 @@ impl StorageBackend for Storage { fn is_encrypted(&self) -> bool { self.is_encrypted() } - + fn info(&self) -> Result, DBError> { self.info() } @@ -284,4 +302,4 @@ impl StorageBackend for Storage { fn clone_arc(&self) -> Arc { unimplemented!("Storage cloning not yet implemented for redb backend") } -} \ No newline at end of file +} diff --git a/src/storage/storage_basic.rs b/src/storage/storage_basic.rs index 1594b87..fbc7f15 100644 --- a/src/storage/storage_basic.rs +++ b/src/storage/storage_basic.rs @@ -1,6 +1,6 @@ -use redb::{ReadableTable}; -use crate::error::DBError; use super::*; +use crate::error::DBError; +use redb::ReadableTable; impl Storage { pub fn flushdb(&self) -> Result<(), DBError> { @@ -15,11 +15,17 @@ impl Storage { let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?; // inefficient, but there is no other way - let keys: Vec = types_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect(); + let keys: Vec = types_table + .iter()? + .map(|item| item.unwrap().0.value().to_string()) + .collect(); for key in keys { types_table.remove(key.as_str())?; } - let keys: Vec = strings_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect(); + let keys: Vec = strings_table + .iter()? + .map(|item| item.unwrap().0.value().to_string()) + .collect(); for key in keys { strings_table.remove(key.as_str())?; } @@ -34,23 +40,35 @@ impl Storage { for (key, field) in keys { hashes_table.remove((key.as_str(), field.as_str()))?; } - let keys: Vec = lists_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect(); + let keys: Vec = lists_table + .iter()? + .map(|item| item.unwrap().0.value().to_string()) + .collect(); for key in keys { lists_table.remove(key.as_str())?; } - let keys: Vec = streams_meta_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect(); + let keys: Vec = streams_meta_table + .iter()? + .map(|item| item.unwrap().0.value().to_string()) + .collect(); for key in keys { streams_meta_table.remove(key.as_str())?; } - let keys: Vec<(String,String)> = streams_data_table.iter()?.map(|item| { - let binding = item.unwrap(); - let (key, field) = binding.0.value(); - (key.to_string(), field.to_string()) - }).collect(); + let keys: Vec<(String, String)> = streams_data_table + .iter()? + .map(|item| { + let binding = item.unwrap(); + let (key, field) = binding.0.value(); + (key.to_string(), field.to_string()) + }) + .collect(); for (key, field) in keys { streams_data_table.remove((key.as_str(), field.as_str()))?; } - let keys: Vec = expiration_table.iter()?.map(|item| item.unwrap().0.value().to_string()).collect(); + let keys: Vec = expiration_table + .iter()? + .map(|item| item.unwrap().0.value().to_string()) + .collect(); for key in keys { expiration_table.remove(key.as_str())?; } @@ -62,7 +80,7 @@ impl Storage { pub fn get_key_type(&self, key: &str) -> Result, DBError> { let read_txn = self.db.begin_read()?; let table = read_txn.open_table(TYPES_TABLE)?; - + // Before returning type, check for expiration if let Some(type_val) = table.get(key)? { if type_val.value() == "string" { @@ -83,7 +101,7 @@ impl Storage { // ✅ ENCRYPTION APPLIED: Value is encrypted/decrypted pub fn get(&self, key: &str) -> Result, DBError> { let read_txn = self.db.begin_read()?; - + let types_table = read_txn.open_table(TYPES_TABLE)?; match types_table.get(key)? { Some(type_val) if type_val.value() == "string" => { @@ -96,7 +114,7 @@ impl Storage { return Ok(None); } } - + // Get and decrypt value let strings_table = read_txn.open_table(STRINGS_TABLE)?; match strings_table.get(key)? { @@ -115,21 +133,21 @@ impl Storage { // ✅ ENCRYPTION APPLIED: Value is encrypted before storage pub fn set(&self, key: String, value: String) -> Result<(), DBError> { let write_txn = self.db.begin_write()?; - + { let mut types_table = write_txn.open_table(TYPES_TABLE)?; types_table.insert(key.as_str(), "string")?; - + let mut strings_table = write_txn.open_table(STRINGS_TABLE)?; // Only encrypt the value, not expiration let encrypted = self.encrypt_if_needed(value.as_bytes())?; strings_table.insert(key.as_str(), encrypted.as_slice())?; - + // Remove any existing expiration since this is a regular SET let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?; expiration_table.remove(key.as_str())?; } - + write_txn.commit()?; Ok(()) } @@ -137,41 +155,42 @@ impl Storage { // ✅ ENCRYPTION APPLIED: Value is encrypted before storage pub fn setx(&self, key: String, value: String, expire_ms: u128) -> Result<(), DBError> { let write_txn = self.db.begin_write()?; - + { let mut types_table = write_txn.open_table(TYPES_TABLE)?; types_table.insert(key.as_str(), "string")?; - + let mut strings_table = write_txn.open_table(STRINGS_TABLE)?; // Only encrypt the value let encrypted = self.encrypt_if_needed(value.as_bytes())?; strings_table.insert(key.as_str(), encrypted.as_slice())?; - + // Store expiration separately (unencrypted) let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?; let expires_at = expire_ms + now_in_millis(); expiration_table.insert(key.as_str(), &(expires_at as u64))?; } - + write_txn.commit()?; Ok(()) } pub fn del(&self, key: String) -> Result<(), DBError> { let write_txn = self.db.begin_write()?; - + { let mut types_table = write_txn.open_table(TYPES_TABLE)?; let mut strings_table = write_txn.open_table(STRINGS_TABLE)?; - let mut hashes_table: redb::Table<(&str, &str), &[u8]> = write_txn.open_table(HASHES_TABLE)?; + let mut hashes_table: redb::Table<(&str, &str), &[u8]> = + write_txn.open_table(HASHES_TABLE)?; let mut lists_table = write_txn.open_table(LISTS_TABLE)?; - + // Remove from type table types_table.remove(key.as_str())?; - + // Remove from strings table strings_table.remove(key.as_str())?; - + // Remove all hash fields for this key let mut to_remove = Vec::new(); let mut iter = hashes_table.iter()?; @@ -183,19 +202,19 @@ impl Storage { } } drop(iter); - + for (hash_key, field) in to_remove { hashes_table.remove((hash_key.as_str(), field.as_str()))?; } // Remove from lists table lists_table.remove(key.as_str())?; - + // Also remove expiration let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?; expiration_table.remove(key.as_str())?; } - + write_txn.commit()?; Ok(()) } @@ -203,7 +222,7 @@ impl Storage { pub fn keys(&self, pattern: &str) -> Result, DBError> { let read_txn = self.db.begin_read()?; let table = read_txn.open_table(TYPES_TABLE)?; - + let mut keys = Vec::new(); let mut iter = table.iter()?; while let Some(entry) = iter.next() { @@ -212,7 +231,7 @@ impl Storage { keys.push(key); } } - + Ok(keys) } } @@ -242,4 +261,4 @@ impl Storage { } Ok(count) } -} \ No newline at end of file +} diff --git a/src/storage/storage_extra.rs b/src/storage/storage_extra.rs index d918b58..bc73641 100644 --- a/src/storage/storage_extra.rs +++ b/src/storage/storage_extra.rs @@ -1,24 +1,29 @@ -use redb::{ReadableTable}; -use crate::error::DBError; use super::*; +use crate::error::DBError; +use redb::ReadableTable; impl Storage { // ✅ ENCRYPTION APPLIED: Values are decrypted after retrieval - pub fn scan(&self, cursor: u64, pattern: Option<&str>, count: Option) -> Result<(u64, Vec<(String, String)>), DBError> { + pub fn scan( + &self, + cursor: u64, + pattern: Option<&str>, + count: Option, + ) -> Result<(u64, Vec<(String, String)>), DBError> { let read_txn = self.db.begin_read()?; let types_table = read_txn.open_table(TYPES_TABLE)?; let strings_table = read_txn.open_table(STRINGS_TABLE)?; - + let mut result = Vec::new(); let mut current_cursor = 0u64; let limit = count.unwrap_or(10) as usize; - + let mut iter = types_table.iter()?; while let Some(entry) = iter.next() { let entry = entry?; let key = entry.0.value().to_string(); let key_type = entry.1.value().to_string(); - + if current_cursor >= cursor { // Apply pattern matching if specified let matches = if let Some(pat) = pattern { @@ -26,7 +31,7 @@ impl Storage { } else { true }; - + if matches { // For scan, we return key-value pairs for string types if key_type == "string" { @@ -41,7 +46,7 @@ impl Storage { // For non-string types, just return the key with type as value result.push((key, key_type)); } - + if result.len() >= limit { break; } @@ -49,15 +54,19 @@ impl Storage { } current_cursor += 1; } - - let next_cursor = if result.len() < limit { 0 } else { current_cursor }; + + let next_cursor = if result.len() < limit { + 0 + } else { + current_cursor + }; Ok((next_cursor, result)) } pub fn ttl(&self, key: &str) -> Result { let read_txn = self.db.begin_read()?; let types_table = read_txn.open_table(TYPES_TABLE)?; - + match types_table.get(key)? { Some(type_val) if type_val.value() == "string" => { let expiration_table = read_txn.open_table(EXPIRATION_TABLE)?; @@ -75,14 +84,14 @@ impl Storage { } } Some(_) => Ok(-1), // Key exists but is not a string (no expiration support for other types) - None => Ok(-2), // Key does not exist + None => Ok(-2), // Key does not exist } } pub fn exists(&self, key: &str) -> Result { let read_txn = self.db.begin_read()?; let types_table = read_txn.open_table(TYPES_TABLE)?; - + match types_table.get(key)? { Some(type_val) if type_val.value() == "string" => { // Check if string key has expired @@ -95,7 +104,7 @@ impl Storage { Ok(true) } Some(_) => Ok(true), // Key exists and is not a string - None => Ok(false), // Key does not exist + None => Ok(false), // Key does not exist } } @@ -178,8 +187,12 @@ impl Storage { .unwrap_or(false); if is_string { let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?; - let expires_at_ms: u128 = if ts_secs <= 0 { 0 } else { (ts_secs as u128) * 1000 }; - expiration_table.insert(key, &((expires_at_ms as u64)))?; + let expires_at_ms: u128 = if ts_secs <= 0 { + 0 + } else { + (ts_secs as u128) * 1000 + }; + expiration_table.insert(key, &(expires_at_ms as u64))?; applied = true; } } @@ -201,7 +214,7 @@ impl Storage { if is_string { let mut expiration_table = write_txn.open_table(EXPIRATION_TABLE)?; let expires_at_ms: u128 = if ts_ms <= 0 { 0 } else { ts_ms as u128 }; - expiration_table.insert(key, &((expires_at_ms as u64)))?; + expiration_table.insert(key, &(expires_at_ms as u64))?; applied = true; } } @@ -223,21 +236,21 @@ pub fn glob_match(pattern: &str, text: &str) -> bool { if pattern == "*" { return true; } - + // Simple glob matching - supports * and ? wildcards let pattern_chars: Vec = pattern.chars().collect(); let text_chars: Vec = text.chars().collect(); - + fn match_recursive(pattern: &[char], text: &[char], pi: usize, ti: usize) -> bool { if pi >= pattern.len() { return ti >= text.len(); } - + if ti >= text.len() { // Check if remaining pattern is all '*' return pattern[pi..].iter().all(|&c| c == '*'); } - + match pattern[pi] { '*' => { // Try matching zero or more characters @@ -262,7 +275,7 @@ pub fn glob_match(pattern: &str, text: &str) -> bool { } } } - + match_recursive(&pattern_chars, &text_chars, 0, 0) } @@ -283,4 +296,4 @@ mod tests { assert!(glob_match("*test*", "this_is_a_test_string")); assert!(!glob_match("*test*", "this_is_a_string")); } -} \ No newline at end of file +} diff --git a/src/storage/storage_hset.rs b/src/storage/storage_hset.rs index dfe9394..9c6d230 100644 --- a/src/storage/storage_hset.rs +++ b/src/storage/storage_hset.rs @@ -1,44 +1,50 @@ -use redb::{ReadableTable}; -use crate::error::DBError; use super::*; +use crate::error::DBError; +use redb::ReadableTable; impl Storage { // ✅ ENCRYPTION APPLIED: Values are encrypted before storage pub fn hset(&self, key: &str, pairs: Vec<(String, String)>) -> Result { let write_txn = self.db.begin_write()?; let mut new_fields = 0i64; - + { let mut types_table = write_txn.open_table(TYPES_TABLE)?; let mut hashes_table = write_txn.open_table(HASHES_TABLE)?; - + let key_type = { let access_guard = types_table.get(key)?; access_guard.map(|v| v.value().to_string()) }; match key_type.as_deref() { - Some("hash") | None => { // Proceed if hash or new key + Some("hash") | None => { + // Proceed if hash or new key // Set the type to hash (only if new key or existing hash) types_table.insert(key, "hash")?; - + for (field, value) in pairs { // Check if field already exists let exists = hashes_table.get((key, field.as_str()))?.is_some(); - + // Encrypt the value before storing let encrypted = self.encrypt_if_needed(value.as_bytes())?; hashes_table.insert((key, field.as_str()), encrypted.as_slice())?; - + if !exists { new_fields += 1; } } } - Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => { + return Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value" + .to_string(), + )) + } } } - + write_txn.commit()?; Ok(new_fields) } @@ -47,7 +53,7 @@ impl Storage { pub fn hget(&self, key: &str, field: &str) -> Result, DBError> { let read_txn = self.db.begin_read()?; let types_table = read_txn.open_table(TYPES_TABLE)?; - + let key_type = types_table.get(key)?.map(|v| v.value().to_string()); match key_type.as_deref() { @@ -62,7 +68,9 @@ impl Storage { None => Ok(None), } } - Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )), None => Ok(None), } } @@ -80,7 +88,7 @@ impl Storage { Some("hash") => { let hashes_table = read_txn.open_table(HASHES_TABLE)?; let mut result = Vec::new(); - + let mut iter = hashes_table.iter()?; while let Some(entry) = iter.next() { let entry = entry?; @@ -91,10 +99,12 @@ impl Storage { result.push((field.to_string(), value)); } } - + Ok(result) } - Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )), None => Ok(Vec::new()), } } @@ -102,24 +112,24 @@ impl Storage { pub fn hdel(&self, key: &str, fields: Vec) -> Result { let write_txn = self.db.begin_write()?; let mut deleted = 0i64; - + // First check if key exists and is a hash let key_type = { let types_table = write_txn.open_table(TYPES_TABLE)?; let access_guard = types_table.get(key)?; access_guard.map(|v| v.value().to_string()) }; - + match key_type.as_deref() { Some("hash") => { let mut hashes_table = write_txn.open_table(HASHES_TABLE)?; - + for field in fields { if hashes_table.remove((key, field.as_str()))?.is_some() { deleted += 1; } } - + // Check if hash is now empty and remove type if so let mut has_fields = false; let mut iter = hashes_table.iter()?; @@ -132,16 +142,20 @@ impl Storage { } } drop(iter); - + if !has_fields { let mut types_table = write_txn.open_table(TYPES_TABLE)?; types_table.remove(key)?; } } - Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => { + return Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )) + } None => {} // Key does not exist, nothing to delete, return 0 deleted } - + write_txn.commit()?; Ok(deleted) } @@ -159,7 +173,9 @@ impl Storage { let hashes_table = read_txn.open_table(HASHES_TABLE)?; Ok(hashes_table.get((key, field))?.is_some()) } - Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )), None => Ok(false), } } @@ -176,7 +192,7 @@ impl Storage { Some("hash") => { let hashes_table = read_txn.open_table(HASHES_TABLE)?; let mut result = Vec::new(); - + let mut iter = hashes_table.iter()?; while let Some(entry) = iter.next() { let entry = entry?; @@ -185,10 +201,12 @@ impl Storage { result.push(field.to_string()); } } - + Ok(result) } - Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )), None => Ok(Vec::new()), } } @@ -206,7 +224,7 @@ impl Storage { Some("hash") => { let hashes_table = read_txn.open_table(HASHES_TABLE)?; let mut result = Vec::new(); - + let mut iter = hashes_table.iter()?; while let Some(entry) = iter.next() { let entry = entry?; @@ -217,10 +235,12 @@ impl Storage { result.push(value); } } - + Ok(result) } - Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )), None => Ok(Vec::new()), } } @@ -237,7 +257,7 @@ impl Storage { Some("hash") => { let hashes_table = read_txn.open_table(HASHES_TABLE)?; let mut count = 0i64; - + let mut iter = hashes_table.iter()?; while let Some(entry) = iter.next() { let entry = entry?; @@ -246,10 +266,12 @@ impl Storage { count += 1; } } - + Ok(count) } - Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )), None => Ok(0), } } @@ -267,7 +289,7 @@ impl Storage { Some("hash") => { let hashes_table = read_txn.open_table(HASHES_TABLE)?; let mut result = Vec::new(); - + for field in fields { match hashes_table.get((key, field.as_str()))? { Some(data) => { @@ -278,10 +300,12 @@ impl Storage { None => result.push(None), } } - + Ok(result) } - Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )), None => Ok(fields.into_iter().map(|_| None).collect()), } } @@ -290,39 +314,51 @@ impl Storage { pub fn hsetnx(&self, key: &str, field: &str, value: &str) -> Result { let write_txn = self.db.begin_write()?; let mut result = false; - + { let mut types_table = write_txn.open_table(TYPES_TABLE)?; let mut hashes_table = write_txn.open_table(HASHES_TABLE)?; - + let key_type = { let access_guard = types_table.get(key)?; access_guard.map(|v| v.value().to_string()) }; match key_type.as_deref() { - Some("hash") | None => { // Proceed if hash or new key + Some("hash") | None => { + // Proceed if hash or new key // Check if field already exists if hashes_table.get((key, field))?.is_none() { // Set the type to hash (only if new key or existing hash) types_table.insert(key, "hash")?; - + // Encrypt the value before storing let encrypted = self.encrypt_if_needed(value.as_bytes())?; hashes_table.insert((key, field), encrypted.as_slice())?; result = true; } } - Some(_) => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => { + return Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value" + .to_string(), + )) + } } } - + write_txn.commit()?; Ok(result) } // ✅ ENCRYPTION APPLIED: Values are decrypted after retrieval - pub fn hscan(&self, key: &str, cursor: u64, pattern: Option<&str>, count: Option) -> Result<(u64, Vec<(String, String)>), DBError> { + pub fn hscan( + &self, + key: &str, + cursor: u64, + pattern: Option<&str>, + count: Option, + ) -> Result<(u64, Vec<(String, String)>), DBError> { let read_txn = self.db.begin_read()?; let types_table = read_txn.open_table(TYPES_TABLE)?; let key_type = { @@ -336,28 +372,28 @@ impl Storage { let mut result = Vec::new(); let mut current_cursor = 0u64; let limit = count.unwrap_or(10) as usize; - + let mut iter = hashes_table.iter()?; while let Some(entry) = iter.next() { let entry = entry?; let (hash_key, field) = entry.0.value(); - + if hash_key == key { if current_cursor >= cursor { let field_str = field.to_string(); - + // Apply pattern matching if specified let matches = if let Some(pat) = pattern { super::storage_extra::glob_match(pat, &field_str) } else { true }; - + if matches { let decrypted = self.decrypt_if_needed(entry.1.value())?; let value = String::from_utf8(decrypted)?; result.push((field_str, value)); - + if result.len() >= limit { break; } @@ -366,12 +402,18 @@ impl Storage { current_cursor += 1; } } - - let next_cursor = if result.len() < limit { 0 } else { current_cursor }; + + let next_cursor = if result.len() < limit { + 0 + } else { + current_cursor + }; Ok((next_cursor, result)) } - Some(_) => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + Some(_) => Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )), None => Ok((0, Vec::new())), } } -} \ No newline at end of file +} diff --git a/src/storage/storage_lists.rs b/src/storage/storage_lists.rs index 93a2ef6..7bfb3e0 100644 --- a/src/storage/storage_lists.rs +++ b/src/storage/storage_lists.rs @@ -1,20 +1,20 @@ -use redb::{ReadableTable}; -use crate::error::DBError; use super::*; +use crate::error::DBError; +use redb::ReadableTable; impl Storage { // ✅ ENCRYPTION APPLIED: Elements are encrypted before storage pub fn lpush(&self, key: &str, elements: Vec) -> Result { let write_txn = self.db.begin_write()?; let mut _length = 0i64; - + { let mut types_table = write_txn.open_table(TYPES_TABLE)?; let mut lists_table = write_txn.open_table(LISTS_TABLE)?; - + // Set the type to list types_table.insert(key, "list")?; - + // Get current list or create empty one let mut list: Vec = match lists_table.get(key)? { Some(data) => { @@ -23,20 +23,20 @@ impl Storage { } None => Vec::new(), }; - + // Add elements to the front (left) for element in elements.into_iter() { list.insert(0, element); } - + _length = list.len() as i64; - + // Encrypt and store the updated list let serialized = serde_json::to_vec(&list)?; let encrypted = self.encrypt_if_needed(&serialized)?; lists_table.insert(key, encrypted.as_slice())?; } - + write_txn.commit()?; Ok(_length) } @@ -45,14 +45,14 @@ impl Storage { pub fn rpush(&self, key: &str, elements: Vec) -> Result { let write_txn = self.db.begin_write()?; let mut _length = 0i64; - + { let mut types_table = write_txn.open_table(TYPES_TABLE)?; let mut lists_table = write_txn.open_table(LISTS_TABLE)?; - + // Set the type to list types_table.insert(key, "list")?; - + // Get current list or create empty one let mut list: Vec = match lists_table.get(key)? { Some(data) => { @@ -61,17 +61,17 @@ impl Storage { } None => Vec::new(), }; - + // Add elements to the end (right) list.extend(elements); _length = list.len() as i64; - + // Encrypt and store the updated list let serialized = serde_json::to_vec(&list)?; let encrypted = self.encrypt_if_needed(&serialized)?; lists_table.insert(key, encrypted.as_slice())?; } - + write_txn.commit()?; Ok(_length) } @@ -80,12 +80,12 @@ impl Storage { pub fn lpop(&self, key: &str, count: u64) -> Result, DBError> { let write_txn = self.db.begin_write()?; let mut result = Vec::new(); - + // First check if key exists and is a list, and get the data let list_data = { let types_table = write_txn.open_table(TYPES_TABLE)?; let lists_table = write_txn.open_table(LISTS_TABLE)?; - + let result = match types_table.get(key)? { Some(type_val) if type_val.value() == "list" => { if let Some(data) = lists_table.get(key)? { @@ -100,7 +100,7 @@ impl Storage { }; result }; - + if let Some(mut list) = list_data { let pop_count = std::cmp::min(count as usize, list.len()); for _ in 0..pop_count { @@ -108,7 +108,7 @@ impl Storage { result.push(list.remove(0)); } } - + let mut lists_table = write_txn.open_table(LISTS_TABLE)?; if list.is_empty() { // Remove the key if list is empty @@ -122,7 +122,7 @@ impl Storage { lists_table.insert(key, encrypted.as_slice())?; } } - + write_txn.commit()?; Ok(result) } @@ -131,12 +131,12 @@ impl Storage { pub fn rpop(&self, key: &str, count: u64) -> Result, DBError> { let write_txn = self.db.begin_write()?; let mut result = Vec::new(); - + // First check if key exists and is a list, and get the data let list_data = { let types_table = write_txn.open_table(TYPES_TABLE)?; let lists_table = write_txn.open_table(LISTS_TABLE)?; - + let result = match types_table.get(key)? { Some(type_val) if type_val.value() == "list" => { if let Some(data) = lists_table.get(key)? { @@ -151,7 +151,7 @@ impl Storage { }; result }; - + if let Some(mut list) = list_data { let pop_count = std::cmp::min(count as usize, list.len()); for _ in 0..pop_count { @@ -159,7 +159,7 @@ impl Storage { result.push(list.pop().unwrap()); } } - + let mut lists_table = write_txn.open_table(LISTS_TABLE)?; if list.is_empty() { // Remove the key if list is empty @@ -173,7 +173,7 @@ impl Storage { lists_table.insert(key, encrypted.as_slice())?; } } - + write_txn.commit()?; Ok(result) } @@ -181,7 +181,7 @@ impl Storage { pub fn llen(&self, key: &str) -> Result { let read_txn = self.db.begin_read()?; let types_table = read_txn.open_table(TYPES_TABLE)?; - + match types_table.get(key)? { Some(type_val) if type_val.value() == "list" => { let lists_table = read_txn.open_table(LISTS_TABLE)?; @@ -202,7 +202,7 @@ impl Storage { pub fn lindex(&self, key: &str, index: i64) -> Result, DBError> { let read_txn = self.db.begin_read()?; let types_table = read_txn.open_table(TYPES_TABLE)?; - + match types_table.get(key)? { Some(type_val) if type_val.value() == "list" => { let lists_table = read_txn.open_table(LISTS_TABLE)?; @@ -210,13 +210,13 @@ impl Storage { Some(data) => { let decrypted = self.decrypt_if_needed(data.value())?; let list: Vec = serde_json::from_slice(&decrypted)?; - + let actual_index = if index < 0 { list.len() as i64 + index } else { index }; - + if actual_index >= 0 && (actual_index as usize) < list.len() { Ok(Some(list[actual_index as usize].clone())) } else { @@ -234,7 +234,7 @@ impl Storage { pub fn lrange(&self, key: &str, start: i64, stop: i64) -> Result, DBError> { let read_txn = self.db.begin_read()?; let types_table = read_txn.open_table(TYPES_TABLE)?; - + match types_table.get(key)? { Some(type_val) if type_val.value() == "list" => { let lists_table = read_txn.open_table(LISTS_TABLE)?; @@ -242,22 +242,30 @@ impl Storage { Some(data) => { let decrypted = self.decrypt_if_needed(data.value())?; let list: Vec = serde_json::from_slice(&decrypted)?; - + if list.is_empty() { return Ok(Vec::new()); } - + let len = list.len() as i64; - let start_idx = if start < 0 { std::cmp::max(0, len + start) } else { std::cmp::min(start, len) }; - let stop_idx = if stop < 0 { std::cmp::max(-1, len + stop) } else { std::cmp::min(stop, len - 1) }; - + let start_idx = if start < 0 { + std::cmp::max(0, len + start) + } else { + std::cmp::min(start, len) + }; + let stop_idx = if stop < 0 { + std::cmp::max(-1, len + stop) + } else { + std::cmp::min(stop, len - 1) + }; + if start_idx > stop_idx || start_idx >= len { return Ok(Vec::new()); } - + let start_usize = start_idx as usize; let stop_usize = (stop_idx + 1) as usize; - + Ok(list[start_usize..std::cmp::min(stop_usize, list.len())].to_vec()) } None => Ok(Vec::new()), @@ -270,12 +278,12 @@ impl Storage { // ✅ ENCRYPTION APPLIED: Elements are decrypted after retrieval and encrypted before storage pub fn ltrim(&self, key: &str, start: i64, stop: i64) -> Result<(), DBError> { let write_txn = self.db.begin_write()?; - + // First check if key exists and is a list, and get the data let list_data = { let types_table = write_txn.open_table(TYPES_TABLE)?; let lists_table = write_txn.open_table(LISTS_TABLE)?; - + let result = match types_table.get(key)? { Some(type_val) if type_val.value() == "list" => { if let Some(data) = lists_table.get(key)? { @@ -290,17 +298,25 @@ impl Storage { }; result }; - + if let Some(list) = list_data { if list.is_empty() { write_txn.commit()?; return Ok(()); } - + let len = list.len() as i64; - let start_idx = if start < 0 { std::cmp::max(0, len + start) } else { std::cmp::min(start, len) }; - let stop_idx = if stop < 0 { std::cmp::max(-1, len + stop) } else { std::cmp::min(stop, len - 1) }; - + let start_idx = if start < 0 { + std::cmp::max(0, len + start) + } else { + std::cmp::min(start, len) + }; + let stop_idx = if stop < 0 { + std::cmp::max(-1, len + stop) + } else { + std::cmp::min(stop, len - 1) + }; + let mut lists_table = write_txn.open_table(LISTS_TABLE)?; if start_idx > stop_idx || start_idx >= len { // Remove the entire list @@ -311,7 +327,7 @@ impl Storage { let start_usize = start_idx as usize; let stop_usize = (stop_idx + 1) as usize; let trimmed = list[start_usize..std::cmp::min(stop_usize, list.len())].to_vec(); - + if trimmed.is_empty() { lists_table.remove(key)?; let mut types_table = write_txn.open_table(TYPES_TABLE)?; @@ -324,7 +340,7 @@ impl Storage { } } } - + write_txn.commit()?; Ok(()) } @@ -333,12 +349,12 @@ impl Storage { pub fn lrem(&self, key: &str, count: i64, element: &str) -> Result { let write_txn = self.db.begin_write()?; let mut removed = 0i64; - + // First check if key exists and is a list, and get the data let list_data = { let types_table = write_txn.open_table(TYPES_TABLE)?; let lists_table = write_txn.open_table(LISTS_TABLE)?; - + let result = match types_table.get(key)? { Some(type_val) if type_val.value() == "list" => { if let Some(data) = lists_table.get(key)? { @@ -353,7 +369,7 @@ impl Storage { }; result }; - + if let Some(mut list) = list_data { if count == 0 { // Remove all occurrences @@ -383,7 +399,7 @@ impl Storage { } } } - + let mut lists_table = write_txn.open_table(LISTS_TABLE)?; if list.is_empty() { lists_table.remove(key)?; @@ -396,8 +412,8 @@ impl Storage { lists_table.insert(key, encrypted.as_slice())?; } } - + write_txn.commit()?; Ok(removed) } -} \ No newline at end of file +} diff --git a/src/storage_sled/mod.rs b/src/storage_sled/mod.rs index ec22b88..d5514d7 100644 --- a/src/storage_sled/mod.rs +++ b/src/storage_sled/mod.rs @@ -1,12 +1,12 @@ // src/storage_sled/mod.rs -use std::path::Path; -use std::sync::Arc; -use std::collections::HashMap; -use std::time::{SystemTime, UNIX_EPOCH}; -use serde::{Deserialize, Serialize}; +use crate::crypto::CryptoFactory; use crate::error::DBError; use crate::storage_trait::StorageBackend; -use crate::crypto::CryptoFactory; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::path::Path; +use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; #[derive(Serialize, Deserialize, Debug, Clone)] enum ValueType { @@ -28,44 +28,56 @@ pub struct SledStorage { } impl SledStorage { - pub fn new(path: impl AsRef, should_encrypt: bool, master_key: Option<&str>) -> Result { + pub fn new( + path: impl AsRef, + should_encrypt: bool, + master_key: Option<&str>, + ) -> Result { let db = sled::open(path).map_err(|e| DBError(format!("Failed to open sled: {}", e)))?; - let types = db.open_tree("types").map_err(|e| DBError(format!("Failed to open types tree: {}", e)))?; - + let types = db + .open_tree("types") + .map_err(|e| DBError(format!("Failed to open types tree: {}", e)))?; + // Check if database was previously encrypted - let encrypted_tree = db.open_tree("encrypted").map_err(|e| DBError(e.to_string()))?; - let was_encrypted = encrypted_tree.get("encrypted") + let encrypted_tree = db + .open_tree("encrypted") + .map_err(|e| DBError(e.to_string()))?; + let was_encrypted = encrypted_tree + .get("encrypted") .map_err(|e| DBError(e.to_string()))? .map(|v| v[0] == 1) .unwrap_or(false); - + let crypto = if should_encrypt || was_encrypted { if let Some(key) = master_key { Some(CryptoFactory::new(key.as_bytes())) } else { - return Err(DBError("Encryption requested but no master key provided".to_string())); + return Err(DBError( + "Encryption requested but no master key provided".to_string(), + )); } } else { None }; - + // Mark database as encrypted if enabling encryption if should_encrypt && !was_encrypted { - encrypted_tree.insert("encrypted", &[1u8]) + encrypted_tree + .insert("encrypted", &[1u8]) .map_err(|e| DBError(e.to_string()))?; encrypted_tree.flush().map_err(|e| DBError(e.to_string()))?; } - + Ok(SledStorage { db, types, crypto }) } - + fn now_millis() -> u128 { SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_millis() } - + fn encrypt_if_needed(&self, data: &[u8]) -> Result, DBError> { if let Some(crypto) = &self.crypto { Ok(crypto.encrypt(data)) @@ -73,7 +85,7 @@ impl SledStorage { Ok(data.to_vec()) } } - + fn decrypt_if_needed(&self, data: &[u8]) -> Result, DBError> { if let Some(crypto) = &self.crypto { Ok(crypto.decrypt(data)?) @@ -81,14 +93,14 @@ impl SledStorage { Ok(data.to_vec()) } } - + fn get_storage_value(&self, key: &str) -> Result, DBError> { match self.db.get(key).map_err(|e| DBError(e.to_string()))? { Some(encrypted_data) => { let decrypted = self.decrypt_if_needed(&encrypted_data)?; let storage_val: StorageValue = bincode::deserialize(&decrypted) .map_err(|e| DBError(format!("Deserialization error: {}", e)))?; - + // Check expiration if let Some(expires_at) = storage_val.expires_at { if Self::now_millis() > expires_at { @@ -98,47 +110,51 @@ impl SledStorage { return Ok(None); } } - + Ok(Some(storage_val)) } - None => Ok(None) + None => Ok(None), } } - + fn set_storage_value(&self, key: &str, storage_val: StorageValue) -> Result<(), DBError> { let data = bincode::serialize(&storage_val) .map_err(|e| DBError(format!("Serialization error: {}", e)))?; let encrypted = self.encrypt_if_needed(&data)?; - self.db.insert(key, encrypted).map_err(|e| DBError(e.to_string()))?; - + self.db + .insert(key, encrypted) + .map_err(|e| DBError(e.to_string()))?; + // Store type info (unencrypted for efficiency) let type_str = match &storage_val.value { ValueType::String(_) => "string", ValueType::Hash(_) => "hash", ValueType::List(_) => "list", }; - self.types.insert(key, type_str.as_bytes()).map_err(|e| DBError(e.to_string()))?; - + self.types + .insert(key, type_str.as_bytes()) + .map_err(|e| DBError(e.to_string()))?; + Ok(()) } - + fn glob_match(pattern: &str, text: &str) -> bool { if pattern == "*" { return true; } - + let pattern_chars: Vec = pattern.chars().collect(); let text_chars: Vec = text.chars().collect(); - + fn match_recursive(pattern: &[char], text: &[char], pi: usize, ti: usize) -> bool { if pi >= pattern.len() { return ti >= text.len(); } - + if ti >= text.len() { return pattern[pi..].iter().all(|&c| c == '*'); } - + match pattern[pi] { '*' => { for i in ti..=text.len() { @@ -158,7 +174,7 @@ impl SledStorage { } } } - + match_recursive(&pattern_chars, &text_chars, 0, 0) } } @@ -168,12 +184,12 @@ impl StorageBackend for SledStorage { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { ValueType::String(s) => Ok(Some(s)), - _ => Ok(None) - } - None => Ok(None) + _ => Ok(None), + }, + None => Ok(None), } } - + fn set(&self, key: String, value: String) -> Result<(), DBError> { let storage_val = StorageValue { value: ValueType::String(value), @@ -183,7 +199,7 @@ impl StorageBackend for SledStorage { self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(()) } - + fn setx(&self, key: String, value: String, expire_ms: u128) -> Result<(), DBError> { let storage_val = StorageValue { value: ValueType::String(value), @@ -193,25 +209,27 @@ impl StorageBackend for SledStorage { self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(()) } - + fn del(&self, key: String) -> Result<(), DBError> { self.db.remove(&key).map_err(|e| DBError(e.to_string()))?; - self.types.remove(&key).map_err(|e| DBError(e.to_string()))?; + self.types + .remove(&key) + .map_err(|e| DBError(e.to_string()))?; self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(()) } - + fn exists(&self, key: &str) -> Result { // Check with expiration Ok(self.get_storage_value(key)?.is_some()) } - + fn keys(&self, pattern: &str) -> Result, DBError> { let mut keys = Vec::new(); for item in self.types.iter() { let (key_bytes, _) = item.map_err(|e| DBError(e.to_string()))?; let key = String::from_utf8_lossy(&key_bytes).to_string(); - + // Check if key is expired if self.get_storage_value(&key)?.is_some() { if Self::glob_match(pattern, &key) { @@ -221,24 +239,29 @@ impl StorageBackend for SledStorage { } Ok(keys) } - - fn scan(&self, cursor: u64, pattern: Option<&str>, count: Option) -> Result<(u64, Vec<(String, String)>), DBError> { + + fn scan( + &self, + cursor: u64, + pattern: Option<&str>, + count: Option, + ) -> Result<(u64, Vec<(String, String)>), DBError> { let mut result = Vec::new(); let mut current_cursor = 0u64; let limit = count.unwrap_or(10) as usize; - + for item in self.types.iter() { if current_cursor >= cursor { let (key_bytes, type_bytes) = item.map_err(|e| DBError(e.to_string()))?; let key = String::from_utf8_lossy(&key_bytes).to_string(); - + // Check pattern match let matches = if let Some(pat) = pattern { Self::glob_match(pat, &key) } else { true }; - + if matches { // Check if key is expired and get value if let Some(storage_val) = self.get_storage_value(&key)? { @@ -247,7 +270,7 @@ impl StorageBackend for SledStorage { _ => String::from_utf8_lossy(&type_bytes).to_string(), }; result.push((key, value)); - + if result.len() >= limit { current_cursor += 1; break; @@ -257,11 +280,15 @@ impl StorageBackend for SledStorage { } current_cursor += 1; } - - let next_cursor = if result.len() < limit { 0 } else { current_cursor }; + + let next_cursor = if result.len() < limit { + 0 + } else { + current_cursor + }; Ok((next_cursor, result)) } - + fn dbsize(&self) -> Result { let mut count = 0i64; for item in self.types.iter() { @@ -273,38 +300,42 @@ impl StorageBackend for SledStorage { } Ok(count) } - + fn flushdb(&self) -> Result<(), DBError> { self.db.clear().map_err(|e| DBError(e.to_string()))?; self.types.clear().map_err(|e| DBError(e.to_string()))?; self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(()) } - + fn get_key_type(&self, key: &str) -> Result, DBError> { // First check if key exists (handles expiration) if self.get_storage_value(key)?.is_some() { match self.types.get(key).map_err(|e| DBError(e.to_string()))? { Some(data) => Ok(Some(String::from_utf8_lossy(&data).to_string())), - None => Ok(None) + None => Ok(None), } } else { Ok(None) } } - + // Hash operations fn hset(&self, key: &str, pairs: Vec<(String, String)>) -> Result { let mut storage_val = self.get_storage_value(key)?.unwrap_or(StorageValue { value: ValueType::Hash(HashMap::new()), expires_at: None, }); - + let hash = match &mut storage_val.value { ValueType::Hash(h) => h, - _ => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + _ => { + return Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )) + } }; - + let mut new_fields = 0i64; for (field, value) in pairs { if !hash.contains_key(&field) { @@ -312,40 +343,46 @@ impl StorageBackend for SledStorage { } hash.insert(field, value); } - + self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(new_fields) } - + fn hget(&self, key: &str, field: &str) -> Result, DBError> { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { ValueType::Hash(h) => Ok(h.get(field).cloned()), - _ => Ok(None) - } - None => Ok(None) + _ => Ok(None), + }, + None => Ok(None), } } - + fn hgetall(&self, key: &str) -> Result, DBError> { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { ValueType::Hash(h) => Ok(h.into_iter().collect()), - _ => Ok(Vec::new()) - } - None => Ok(Vec::new()) + _ => Ok(Vec::new()), + }, + None => Ok(Vec::new()), } } - - fn hscan(&self, key: &str, cursor: u64, pattern: Option<&str>, count: Option) -> Result<(u64, Vec<(String, String)>), DBError> { + + fn hscan( + &self, + key: &str, + cursor: u64, + pattern: Option<&str>, + count: Option, + ) -> Result<(u64, Vec<(String, String)>), DBError> { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { ValueType::Hash(h) => { let mut result = Vec::new(); let mut current_cursor = 0u64; let limit = count.unwrap_or(10) as usize; - + for (field, value) in h.iter() { if current_cursor >= cursor { let matches = if let Some(pat) = pattern { @@ -353,7 +390,7 @@ impl StorageBackend for SledStorage { } else { true }; - + if matches { result.push((field.clone(), value.clone())); if result.len() >= limit { @@ -364,107 +401,115 @@ impl StorageBackend for SledStorage { } current_cursor += 1; } - - let next_cursor = if result.len() < limit { 0 } else { current_cursor }; + + let next_cursor = if result.len() < limit { + 0 + } else { + current_cursor + }; Ok((next_cursor, result)) } - _ => Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())) - } - None => Ok((0, Vec::new())) + _ => Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )), + }, + None => Ok((0, Vec::new())), } } - + fn hdel(&self, key: &str, fields: Vec) -> Result { let mut storage_val = match self.get_storage_value(key)? { Some(sv) => sv, - None => return Ok(0) + None => return Ok(0), }; - + let hash = match &mut storage_val.value { ValueType::Hash(h) => h, - _ => return Ok(0) + _ => return Ok(0), }; - + let mut deleted = 0i64; for field in fields { if hash.remove(&field).is_some() { deleted += 1; } } - + if hash.is_empty() { self.del(key.to_string())?; } else { self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; } - + Ok(deleted) } - + fn hexists(&self, key: &str, field: &str) -> Result { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { ValueType::Hash(h) => Ok(h.contains_key(field)), - _ => Ok(false) - } - None => Ok(false) + _ => Ok(false), + }, + None => Ok(false), } } - + fn hkeys(&self, key: &str) -> Result, DBError> { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { ValueType::Hash(h) => Ok(h.keys().cloned().collect()), - _ => Ok(Vec::new()) - } - None => Ok(Vec::new()) + _ => Ok(Vec::new()), + }, + None => Ok(Vec::new()), } } - + fn hvals(&self, key: &str) -> Result, DBError> { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { ValueType::Hash(h) => Ok(h.values().cloned().collect()), - _ => Ok(Vec::new()) - } - None => Ok(Vec::new()) + _ => Ok(Vec::new()), + }, + None => Ok(Vec::new()), } } - + fn hlen(&self, key: &str) -> Result { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { ValueType::Hash(h) => Ok(h.len() as i64), - _ => Ok(0) - } - None => Ok(0) + _ => Ok(0), + }, + None => Ok(0), } } - + fn hmget(&self, key: &str, fields: Vec) -> Result>, DBError> { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { - ValueType::Hash(h) => { - Ok(fields.into_iter().map(|f| h.get(&f).cloned()).collect()) - } - _ => Ok(fields.into_iter().map(|_| None).collect()) - } - None => Ok(fields.into_iter().map(|_| None).collect()) + ValueType::Hash(h) => Ok(fields.into_iter().map(|f| h.get(&f).cloned()).collect()), + _ => Ok(fields.into_iter().map(|_| None).collect()), + }, + None => Ok(fields.into_iter().map(|_| None).collect()), } } - + fn hsetnx(&self, key: &str, field: &str, value: &str) -> Result { let mut storage_val = self.get_storage_value(key)?.unwrap_or(StorageValue { value: ValueType::Hash(HashMap::new()), expires_at: None, }); - + let hash = match &mut storage_val.value { ValueType::Hash(h) => h, - _ => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + _ => { + return Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )) + } }; - + if hash.contains_key(field) { Ok(false) } else { @@ -474,58 +519,66 @@ impl StorageBackend for SledStorage { Ok(true) } } - + // List operations fn lpush(&self, key: &str, elements: Vec) -> Result { let mut storage_val = self.get_storage_value(key)?.unwrap_or(StorageValue { value: ValueType::List(Vec::new()), expires_at: None, }); - + let list = match &mut storage_val.value { ValueType::List(l) => l, - _ => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + _ => { + return Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )) + } }; - + for element in elements.into_iter().rev() { list.insert(0, element); } - + let len = list.len() as i64; self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(len) } - + fn rpush(&self, key: &str, elements: Vec) -> Result { let mut storage_val = self.get_storage_value(key)?.unwrap_or(StorageValue { value: ValueType::List(Vec::new()), expires_at: None, }); - + let list = match &mut storage_val.value { ValueType::List(l) => l, - _ => return Err(DBError("WRONGTYPE Operation against a key holding the wrong kind of value".to_string())), + _ => { + return Err(DBError( + "WRONGTYPE Operation against a key holding the wrong kind of value".to_string(), + )) + } }; - + list.extend(elements); let len = list.len() as i64; self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(len) } - + fn lpop(&self, key: &str, count: u64) -> Result, DBError> { let mut storage_val = match self.get_storage_value(key)? { Some(sv) => sv, - None => return Ok(Vec::new()) + None => return Ok(Vec::new()), }; - + let list = match &mut storage_val.value { ValueType::List(l) => l, - _ => return Ok(Vec::new()) + _ => return Ok(Vec::new()), }; - + let mut result = Vec::new(); for _ in 0..count.min(list.len() as u64) { if let Some(elem) = list.first() { @@ -533,55 +586,55 @@ impl StorageBackend for SledStorage { list.remove(0); } } - + if list.is_empty() { self.del(key.to_string())?; } else { self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; } - + Ok(result) } - + fn rpop(&self, key: &str, count: u64) -> Result, DBError> { let mut storage_val = match self.get_storage_value(key)? { Some(sv) => sv, - None => return Ok(Vec::new()) + None => return Ok(Vec::new()), }; - + let list = match &mut storage_val.value { ValueType::List(l) => l, - _ => return Ok(Vec::new()) + _ => return Ok(Vec::new()), }; - + let mut result = Vec::new(); for _ in 0..count.min(list.len() as u64) { if let Some(elem) = list.pop() { result.push(elem); } } - + if list.is_empty() { self.del(key.to_string())?; } else { self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; } - + Ok(result) } - + fn llen(&self, key: &str) -> Result { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { ValueType::List(l) => Ok(l.len() as i64), - _ => Ok(0) - } - None => Ok(0) + _ => Ok(0), + }, + None => Ok(0), } } - + fn lindex(&self, key: &str, index: i64) -> Result, DBError> { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { @@ -591,19 +644,19 @@ impl StorageBackend for SledStorage { } else { index }; - + if actual_index >= 0 && (actual_index as usize) < list.len() { Ok(Some(list[actual_index as usize].clone())) } else { Ok(None) } } - _ => Ok(None) - } - None => Ok(None) + _ => Ok(None), + }, + None => Ok(None), } } - + fn lrange(&self, key: &str, start: i64, stop: i64) -> Result, DBError> { match self.get_storage_value(key)? { Some(storage_val) => match storage_val.value { @@ -611,68 +664,68 @@ impl StorageBackend for SledStorage { if list.is_empty() { return Ok(Vec::new()); } - + let len = list.len() as i64; - let start_idx = if start < 0 { - std::cmp::max(0, len + start) - } else { - std::cmp::min(start, len) + let start_idx = if start < 0 { + std::cmp::max(0, len + start) + } else { + std::cmp::min(start, len) }; - let stop_idx = if stop < 0 { - std::cmp::max(-1, len + stop) - } else { - std::cmp::min(stop, len - 1) + let stop_idx = if stop < 0 { + std::cmp::max(-1, len + stop) + } else { + std::cmp::min(stop, len - 1) }; - + if start_idx > stop_idx || start_idx >= len { return Ok(Vec::new()); } - + let start_usize = start_idx as usize; let stop_usize = (stop_idx + 1) as usize; - + Ok(list[start_usize..std::cmp::min(stop_usize, list.len())].to_vec()) } - _ => Ok(Vec::new()) - } - None => Ok(Vec::new()) + _ => Ok(Vec::new()), + }, + None => Ok(Vec::new()), } } - + fn ltrim(&self, key: &str, start: i64, stop: i64) -> Result<(), DBError> { let mut storage_val = match self.get_storage_value(key)? { Some(sv) => sv, - None => return Ok(()) + None => return Ok(()), }; - + let list = match &mut storage_val.value { ValueType::List(l) => l, - _ => return Ok(()) + _ => return Ok(()), }; - + if list.is_empty() { return Ok(()); } - + let len = list.len() as i64; - let start_idx = if start < 0 { - std::cmp::max(0, len + start) - } else { - std::cmp::min(start, len) + let start_idx = if start < 0 { + std::cmp::max(0, len + start) + } else { + std::cmp::min(start, len) }; - let stop_idx = if stop < 0 { - std::cmp::max(-1, len + stop) - } else { - std::cmp::min(stop, len - 1) + let stop_idx = if stop < 0 { + std::cmp::max(-1, len + stop) + } else { + std::cmp::min(stop, len - 1) }; - + if start_idx > stop_idx || start_idx >= len { self.del(key.to_string())?; } else { let start_usize = start_idx as usize; let stop_usize = (stop_idx + 1) as usize; *list = list[start_usize..std::cmp::min(stop_usize, list.len())].to_vec(); - + if list.is_empty() { self.del(key.to_string())?; } else { @@ -680,23 +733,23 @@ impl StorageBackend for SledStorage { self.db.flush().map_err(|e| DBError(e.to_string()))?; } } - + Ok(()) } - + fn lrem(&self, key: &str, count: i64, element: &str) -> Result { let mut storage_val = match self.get_storage_value(key)? { Some(sv) => sv, - None => return Ok(0) + None => return Ok(0), }; - + let list = match &mut storage_val.value { ValueType::List(l) => l, - _ => return Ok(0) + _ => return Ok(0), }; - + let mut removed = 0i64; - + if count == 0 { // Remove all occurrences let original_len = list.len(); @@ -725,17 +778,17 @@ impl StorageBackend for SledStorage { } } } - + if list.is_empty() { self.del(key.to_string())?; } else { self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; } - + Ok(removed) } - + // Expiration fn ttl(&self, key: &str) -> Result { match self.get_storage_value(key)? { @@ -751,40 +804,40 @@ impl StorageBackend for SledStorage { Ok(-1) // Key exists but has no expiration } } - None => Ok(-2) // Key does not exist + None => Ok(-2), // Key does not exist } } - + fn expire_seconds(&self, key: &str, secs: u64) -> Result { let mut storage_val = match self.get_storage_value(key)? { Some(sv) => sv, - None => return Ok(false) + None => return Ok(false), }; - + storage_val.expires_at = Some(Self::now_millis() + (secs as u128) * 1000); self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(true) } - + fn pexpire_millis(&self, key: &str, ms: u128) -> Result { let mut storage_val = match self.get_storage_value(key)? { Some(sv) => sv, - None => return Ok(false) + None => return Ok(false), }; - + storage_val.expires_at = Some(Self::now_millis() + ms); self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(true) } - + fn persist(&self, key: &str) -> Result { let mut storage_val = match self.get_storage_value(key)? { Some(sv) => sv, - None => return Ok(false) + None => return Ok(false), }; - + if storage_val.expires_at.is_some() { storage_val.expires_at = None; self.set_storage_value(key, storage_val)?; @@ -794,37 +847,41 @@ impl StorageBackend for SledStorage { Ok(false) } } - + fn expire_at_seconds(&self, key: &str, ts_secs: i64) -> Result { let mut storage_val = match self.get_storage_value(key)? { Some(sv) => sv, - None => return Ok(false) + None => return Ok(false), + }; + + let expires_at_ms: u128 = if ts_secs <= 0 { + 0 + } else { + (ts_secs as u128) * 1000 }; - - let expires_at_ms: u128 = if ts_secs <= 0 { 0 } else { (ts_secs as u128) * 1000 }; storage_val.expires_at = Some(expires_at_ms); self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(true) } - + fn pexpire_at_millis(&self, key: &str, ts_ms: i64) -> Result { let mut storage_val = match self.get_storage_value(key)? { Some(sv) => sv, - None => return Ok(false) + None => return Ok(false), }; - + let expires_at_ms: u128 = if ts_ms <= 0 { 0 } else { ts_ms as u128 }; storage_val.expires_at = Some(expires_at_ms); self.set_storage_value(key, storage_val)?; self.db.flush().map_err(|e| DBError(e.to_string()))?; Ok(true) } - + fn is_encrypted(&self) -> bool { self.crypto.is_some() } - + fn info(&self) -> Result, DBError> { let dbsize = self.dbsize()?; Ok(vec![ @@ -842,4 +899,4 @@ impl StorageBackend for SledStorage { crypto: self.crypto.clone(), }) } -} \ No newline at end of file +} diff --git a/src/storage_trait.rs b/src/storage_trait.rs index 13fe11e..4e4ef1e 100644 --- a/src/storage_trait.rs +++ b/src/storage_trait.rs @@ -13,11 +13,22 @@ pub trait StorageBackend: Send + Sync { fn dbsize(&self) -> Result; fn flushdb(&self) -> Result<(), DBError>; fn get_key_type(&self, key: &str) -> Result, DBError>; - + // Scanning - fn scan(&self, cursor: u64, pattern: Option<&str>, count: Option) -> Result<(u64, Vec<(String, String)>), DBError>; - fn hscan(&self, key: &str, cursor: u64, pattern: Option<&str>, count: Option) -> Result<(u64, Vec<(String, String)>), DBError>; - + fn scan( + &self, + cursor: u64, + pattern: Option<&str>, + count: Option, + ) -> Result<(u64, Vec<(String, String)>), DBError>; + fn hscan( + &self, + key: &str, + cursor: u64, + pattern: Option<&str>, + count: Option, + ) -> Result<(u64, Vec<(String, String)>), DBError>; + // Hash operations fn hset(&self, key: &str, pairs: Vec<(String, String)>) -> Result; fn hget(&self, key: &str, field: &str) -> Result, DBError>; @@ -29,7 +40,7 @@ pub trait StorageBackend: Send + Sync { fn hlen(&self, key: &str) -> Result; fn hmget(&self, key: &str, fields: Vec) -> Result>, DBError>; fn hsetnx(&self, key: &str, field: &str, value: &str) -> Result; - + // List operations fn lpush(&self, key: &str, elements: Vec) -> Result; fn rpush(&self, key: &str, elements: Vec) -> Result; @@ -40,7 +51,7 @@ pub trait StorageBackend: Send + Sync { fn lrange(&self, key: &str, start: i64, stop: i64) -> Result, DBError>; fn ltrim(&self, key: &str, start: i64, stop: i64) -> Result<(), DBError>; fn lrem(&self, key: &str, count: i64, element: &str) -> Result; - + // Expiration fn ttl(&self, key: &str) -> Result; fn expire_seconds(&self, key: &str, secs: u64) -> Result; @@ -48,11 +59,11 @@ pub trait StorageBackend: Send + Sync { fn persist(&self, key: &str) -> Result; fn expire_at_seconds(&self, key: &str, ts_secs: i64) -> Result; fn pexpire_at_millis(&self, key: &str, ts_ms: i64) -> Result; - + // Metadata fn is_encrypted(&self) -> bool; fn info(&self) -> Result, DBError>; - + // Clone to Arc for sharing fn clone_arc(&self) -> Arc; -} \ No newline at end of file +} diff --git a/src/tantivy_search.rs b/src/tantivy_search.rs new file mode 100644 index 0000000..7615d35 --- /dev/null +++ b/src/tantivy_search.rs @@ -0,0 +1,657 @@ +use crate::error::DBError; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::path::PathBuf; +use std::sync::{Arc, RwLock}; +use tantivy::{ + collector::TopDocs, + directory::MmapDirectory, + query::{BooleanQuery, Occur, Query, QueryParser, TermQuery}, + schema::{Field, Schema, TextFieldIndexing, TextOptions, Value, STORED, STRING}, + tokenizer::TokenizerManager, + DateTime, Index, IndexReader, IndexWriter, ReloadPolicy, TantivyDocument, Term, +}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum FieldDef { + Text { + stored: bool, + indexed: bool, + tokenized: bool, + fast: bool, + }, + Numeric { + stored: bool, + indexed: bool, + fast: bool, + precision: NumericType, + }, + Tag { + stored: bool, + separator: String, + case_sensitive: bool, + }, + Geo { + stored: bool, + }, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum NumericType { + I64, + U64, + F64, + Date, +} + +pub struct IndexSchema { + schema: Schema, + fields: HashMap, + default_search_fields: Vec, +} + +pub struct TantivySearch { + index: Index, + writer: Arc>, + reader: IndexReader, + index_schema: IndexSchema, + name: String, + config: IndexConfig, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct IndexConfig { + pub language: String, + pub stopwords: Vec, + pub stemming: bool, + pub max_doc_count: Option, + pub default_score: f64, +} + +impl Default for IndexConfig { + fn default() -> Self { + IndexConfig { + language: "english".to_string(), + stopwords: vec![], + stemming: true, + max_doc_count: None, + default_score: 1.0, + } + } +} + +impl TantivySearch { + pub fn new_with_schema( + base_path: PathBuf, + name: String, + field_definitions: Vec<(String, FieldDef)>, + config: Option, + ) -> Result { + let index_path = base_path.join(&name); + std::fs::create_dir_all(&index_path) + .map_err(|e| DBError(format!("Failed to create index dir: {}", e)))?; + + // Build schema from field definitions + let mut schema_builder = Schema::builder(); + let mut fields = HashMap::new(); + let mut default_search_fields = Vec::new(); + + // Always add a document ID field + let id_field = schema_builder.add_text_field("_id", STRING | STORED); + fields.insert( + "_id".to_string(), + ( + id_field, + FieldDef::Text { + stored: true, + indexed: true, + tokenized: false, + fast: false, + }, + ), + ); + + // Add user-defined fields + for (field_name, field_def) in field_definitions { + let field = match &field_def { + FieldDef::Text { + stored, + indexed, + tokenized, + fast: _fast, + } => { + let mut text_options = TextOptions::default(); + + if *stored { + text_options = text_options.set_stored(); + } + + if *indexed { + let indexing_options = if *tokenized { + TextFieldIndexing::default() + .set_tokenizer("default") + .set_index_option( + tantivy::schema::IndexRecordOption::WithFreqsAndPositions, + ) + } else { + TextFieldIndexing::default() + .set_tokenizer("raw") + .set_index_option(tantivy::schema::IndexRecordOption::Basic) + }; + text_options = text_options.set_indexing_options(indexing_options); + + let f = schema_builder.add_text_field(&field_name, text_options); + if *tokenized { + default_search_fields.push(f); + } + f + } else { + schema_builder.add_text_field(&field_name, text_options) + } + } + FieldDef::Numeric { + stored, + indexed, + fast, + precision, + } => match precision { + NumericType::I64 => { + let mut opts = tantivy::schema::NumericOptions::default(); + if *stored { + opts = opts.set_stored(); + } + if *indexed { + opts = opts.set_indexed(); + } + if *fast { + opts = opts.set_fast(); + } + schema_builder.add_i64_field(&field_name, opts) + } + NumericType::U64 => { + let mut opts = tantivy::schema::NumericOptions::default(); + if *stored { + opts = opts.set_stored(); + } + if *indexed { + opts = opts.set_indexed(); + } + if *fast { + opts = opts.set_fast(); + } + schema_builder.add_u64_field(&field_name, opts) + } + NumericType::F64 => { + let mut opts = tantivy::schema::NumericOptions::default(); + if *stored { + opts = opts.set_stored(); + } + if *indexed { + opts = opts.set_indexed(); + } + if *fast { + opts = opts.set_fast(); + } + schema_builder.add_f64_field(&field_name, opts) + } + NumericType::Date => { + let mut opts = tantivy::schema::DateOptions::default(); + if *stored { + opts = opts.set_stored(); + } + if *indexed { + opts = opts.set_indexed(); + } + if *fast { + opts = opts.set_fast(); + } + schema_builder.add_date_field(&field_name, opts) + } + }, + FieldDef::Tag { + stored, + separator: _, + case_sensitive: _, + } => { + let mut text_options = TextOptions::default(); + if *stored { + text_options = text_options.set_stored(); + } + text_options = text_options.set_indexing_options( + TextFieldIndexing::default() + .set_tokenizer("raw") + .set_index_option(tantivy::schema::IndexRecordOption::Basic), + ); + schema_builder.add_text_field(&field_name, text_options) + } + FieldDef::Geo { stored } => { + // For now, store as two f64 fields for lat/lon + let mut opts = tantivy::schema::NumericOptions::default(); + if *stored { + opts = opts.set_stored(); + } + opts = opts.set_indexed().set_fast(); + + let lat_field = + schema_builder.add_f64_field(&format!("{}_lat", field_name), opts.clone()); + let lon_field = + schema_builder.add_f64_field(&format!("{}_lon", field_name), opts); + + fields.insert( + format!("{}_lat", field_name), + ( + lat_field, + FieldDef::Numeric { + stored: *stored, + indexed: true, + fast: true, + precision: NumericType::F64, + }, + ), + ); + fields.insert( + format!("{}_lon", field_name), + ( + lon_field, + FieldDef::Numeric { + stored: *stored, + indexed: true, + fast: true, + precision: NumericType::F64, + }, + ), + ); + continue; // Skip adding the geo field itself + } + }; + + fields.insert(field_name.clone(), (field, field_def)); + } + + let schema = schema_builder.build(); + let index_schema = IndexSchema { + schema: schema.clone(), + fields, + default_search_fields, + }; + + // Create or open index + let dir = MmapDirectory::open(&index_path) + .map_err(|e| DBError(format!("Failed to open index directory: {}", e)))?; + + let mut index = Index::open_or_create(dir, schema) + .map_err(|e| DBError(format!("Failed to create index: {}", e)))?; + + // Configure tokenizers + let tokenizer_manager = TokenizerManager::default(); + index.set_tokenizers(tokenizer_manager); + + let writer = index + .writer(15_000_000) + .map_err(|e| DBError(format!("Failed to create index writer: {}", e)))?; + + let reader = index + .reader_builder() + .reload_policy(ReloadPolicy::OnCommitWithDelay) + .try_into() + .map_err(|e| DBError(format!("Failed to create reader: {}", e)))?; + + let config = config.unwrap_or_default(); + + Ok(TantivySearch { + index, + writer: Arc::new(RwLock::new(writer)), + reader, + index_schema, + name, + config, + }) + } + + pub fn add_document_with_fields( + &self, + doc_id: &str, + fields: HashMap, + ) -> Result<(), DBError> { + let mut writer = self + .writer + .write() + .map_err(|e| DBError(format!("Failed to acquire writer lock: {}", e)))?; + + // Delete existing document with same ID + if let Some((id_field, _)) = self.index_schema.fields.get("_id") { + writer.delete_term(Term::from_field_text(*id_field, doc_id)); + } + + // Create new document + let mut doc = tantivy::doc!(); + + // Add document ID + if let Some((id_field, _)) = self.index_schema.fields.get("_id") { + doc.add_text(*id_field, doc_id); + } + + // Add other fields based on schema + for (field_name, field_value) in fields { + if let Some((field, field_def)) = self.index_schema.fields.get(&field_name) { + match field_def { + FieldDef::Text { .. } => { + doc.add_text(*field, &field_value); + } + FieldDef::Numeric { precision, .. } => match precision { + NumericType::I64 => { + if let Ok(v) = field_value.parse::() { + doc.add_i64(*field, v); + } + } + NumericType::U64 => { + if let Ok(v) = field_value.parse::() { + doc.add_u64(*field, v); + } + } + NumericType::F64 => { + if let Ok(v) = field_value.parse::() { + doc.add_f64(*field, v); + } + } + NumericType::Date => { + if let Ok(v) = field_value.parse::() { + doc.add_date(*field, DateTime::from_timestamp_millis(v)); + } + } + }, + FieldDef::Tag { + separator, + case_sensitive, + .. + } => { + let tags = if !case_sensitive { + field_value.to_lowercase() + } else { + field_value.clone() + }; + + // Store tags as separate terms for efficient filtering + for tag in tags.split(separator.as_str()) { + doc.add_text(*field, tag.trim()); + } + } + FieldDef::Geo { .. } => { + // Parse "lat,lon" format + let parts: Vec<&str> = field_value.split(',').collect(); + if parts.len() == 2 { + if let (Ok(lat), Ok(lon)) = + (parts[0].parse::(), parts[1].parse::()) + { + if let Some((lat_field, _)) = + self.index_schema.fields.get(&format!("{}_lat", field_name)) + { + doc.add_f64(*lat_field, lat); + } + if let Some((lon_field, _)) = + self.index_schema.fields.get(&format!("{}_lon", field_name)) + { + doc.add_f64(*lon_field, lon); + } + } + } + } + } + } + } + + writer + .add_document(doc) + .map_err(|e| DBError(format!("Failed to add document: {}", e)))?; + + writer + .commit() + .map_err(|e| DBError(format!("Failed to commit: {}", e)))?; + + Ok(()) + } + + pub fn search_with_options( + &self, + query_str: &str, + options: SearchOptions, + ) -> Result { + let searcher = self.reader.searcher(); + + // Parse query based on search fields + let query: Box = if self.index_schema.default_search_fields.is_empty() { + return Err(DBError( + "No searchable fields defined in schema".to_string(), + )); + } else { + let query_parser = QueryParser::for_index( + &self.index, + self.index_schema.default_search_fields.clone(), + ); + + Box::new( + query_parser + .parse_query(query_str) + .map_err(|e| DBError(format!("Failed to parse query: {}", e)))?, + ) + }; + + // Apply filters if any + let final_query = if !options.filters.is_empty() { + let mut clauses: Vec<(Occur, Box)> = vec![(Occur::Must, query)]; + + // Add filters + for filter in options.filters { + if let Some((field, _)) = self.index_schema.fields.get(&filter.field) { + match filter.filter_type { + FilterType::Equals(value) => { + let term_query = TermQuery::new( + Term::from_field_text(*field, &value), + tantivy::schema::IndexRecordOption::Basic, + ); + clauses.push((Occur::Must, Box::new(term_query))); + } + FilterType::Range { min: _, max: _ } => { + // Would need numeric field handling here + // Simplified for now + } + FilterType::InSet(values) => { + let mut sub_clauses: Vec<(Occur, Box)> = vec![]; + for value in values { + let term_query = TermQuery::new( + Term::from_field_text(*field, &value), + tantivy::schema::IndexRecordOption::Basic, + ); + sub_clauses.push((Occur::Should, Box::new(term_query))); + } + clauses.push((Occur::Must, Box::new(BooleanQuery::new(sub_clauses)))); + } + } + } + } + + Box::new(BooleanQuery::new(clauses)) + } else { + query + }; + + // Execute search + let top_docs = searcher + .search( + &*final_query, + &TopDocs::with_limit(options.limit + options.offset), + ) + .map_err(|e| DBError(format!("Search failed: {}", e)))?; + + let total_hits = top_docs.len(); + let mut documents = Vec::new(); + + for (score, doc_address) in top_docs.iter().skip(options.offset).take(options.limit) { + let retrieved_doc: TantivyDocument = searcher + .doc(*doc_address) + .map_err(|e| DBError(format!("Failed to retrieve doc: {}", e)))?; + + let mut doc_fields = HashMap::new(); + + // Extract all stored fields + for (field_name, (field, field_def)) in &self.index_schema.fields { + match field_def { + FieldDef::Text { stored, .. } | FieldDef::Tag { stored, .. } => { + if *stored { + if let Some(value) = retrieved_doc.get_first(*field) { + if let Some(text) = value.as_str() { + doc_fields.insert(field_name.clone(), text.to_string()); + } + } + } + } + FieldDef::Numeric { + stored, precision, .. + } => { + if *stored { + let value_str = match precision { + NumericType::I64 => retrieved_doc + .get_first(*field) + .and_then(|v| v.as_i64()) + .map(|v| v.to_string()), + NumericType::U64 => retrieved_doc + .get_first(*field) + .and_then(|v| v.as_u64()) + .map(|v| v.to_string()), + NumericType::F64 => retrieved_doc + .get_first(*field) + .and_then(|v| v.as_f64()) + .map(|v| v.to_string()), + NumericType::Date => retrieved_doc + .get_first(*field) + .and_then(|v| v.as_datetime()) + .map(|v| v.into_timestamp_millis().to_string()), + }; + + if let Some(v) = value_str { + doc_fields.insert(field_name.clone(), v); + } + } + } + FieldDef::Geo { stored } => { + if *stored { + let lat_field = self + .index_schema + .fields + .get(&format!("{}_lat", field_name)) + .unwrap() + .0; + let lon_field = self + .index_schema + .fields + .get(&format!("{}_lon", field_name)) + .unwrap() + .0; + + let lat = retrieved_doc.get_first(lat_field).and_then(|v| v.as_f64()); + let lon = retrieved_doc.get_first(lon_field).and_then(|v| v.as_f64()); + + if let (Some(lat), Some(lon)) = (lat, lon) { + doc_fields.insert(field_name.clone(), format!("{},{}", lat, lon)); + } + } + } + } + } + + documents.push(SearchDocument { + fields: doc_fields, + score: *score, + }); + } + + Ok(SearchResults { + total: total_hits, + documents, + }) + } + + pub fn get_info(&self) -> Result { + let searcher = self.reader.searcher(); + let num_docs = searcher.num_docs(); + + let fields_info: Vec = self + .index_schema + .fields + .iter() + .map(|(name, (_, def))| FieldInfo { + name: name.clone(), + field_type: format!("{:?}", def), + }) + .collect(); + + Ok(IndexInfo { + name: self.name.clone(), + num_docs, + fields: fields_info, + config: self.config.clone(), + }) + } +} + +#[derive(Debug, Clone)] +pub struct SearchOptions { + pub limit: usize, + pub offset: usize, + pub filters: Vec, + pub sort_by: Option, + pub return_fields: Option>, + pub highlight: bool, +} + +impl Default for SearchOptions { + fn default() -> Self { + SearchOptions { + limit: 10, + offset: 0, + filters: vec![], + sort_by: None, + return_fields: None, + highlight: false, + } + } +} + +#[derive(Debug, Clone)] +pub struct Filter { + pub field: String, + pub filter_type: FilterType, +} + +#[derive(Debug, Clone)] +pub enum FilterType { + Equals(String), + Range { min: String, max: String }, + InSet(Vec), +} + +#[derive(Debug)] +pub struct SearchResults { + pub total: usize, + pub documents: Vec, +} + +#[derive(Debug)] +pub struct SearchDocument { + pub fields: HashMap, + pub score: f32, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct IndexInfo { + pub name: String, + pub num_docs: u64, + pub fields: Vec, + pub config: IndexConfig, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct FieldInfo { + pub name: String, + pub field_type: String, +} diff --git a/tests/debug_hset.rs b/tests/debug_hset.rs index 7930be8..b921d09 100644 --- a/tests/debug_hset.rs +++ b/tests/debug_hset.rs @@ -1,4 +1,4 @@ -use herodb::{server::Server, options::DBOption}; +use herodb::{options::DBOption, server::Server}; use std::time::Duration; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpStream; @@ -7,7 +7,7 @@ use tokio::time::sleep; // Helper function to send command and get response async fn send_command(stream: &mut TcpStream, command: &str) -> String { stream.write_all(command.as_bytes()).await.unwrap(); - + let mut buffer = [0; 1024]; let n = stream.read(&mut buffer).await.unwrap(); String::from_utf8_lossy(&buffer[..n]).to_string() @@ -19,7 +19,7 @@ async fn debug_hset_simple() { let test_dir = "/tmp/herodb_debug_hset"; let _ = std::fs::remove_dir_all(test_dir); std::fs::create_dir_all(test_dir).unwrap(); - + let port = 16500; let option = DBOption { dir: test_dir.to_string(), @@ -29,35 +29,49 @@ async fn debug_hset_simple() { encryption_key: None, backend: herodb::options::BackendType::Redb, }; - + let mut server = Server::new(option).await; - + // Start server in background tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(200)).await; - - let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)).await.unwrap(); - + + let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)) + .await + .unwrap(); + // Test simple HSET println!("Testing HSET..."); - let response = send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n").await; + let response = send_command( + &mut stream, + "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n", + ) + .await; println!("HSET response: {}", response); assert!(response.contains("1"), "Expected '1' but got: {}", response); - + // Test HGET println!("Testing HGET..."); - let response = send_command(&mut stream, "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n", + ) + .await; println!("HGET response: {}", response); - assert!(response.contains("value1"), "Expected 'value1' but got: {}", response); -} \ No newline at end of file + assert!( + response.contains("value1"), + "Expected 'value1' but got: {}", + response + ); +} diff --git a/tests/debug_hset_simple.rs b/tests/debug_hset_simple.rs index 356e704..621b962 100644 --- a/tests/debug_hset_simple.rs +++ b/tests/debug_hset_simple.rs @@ -1,4 +1,4 @@ -use herodb::{server::Server, options::DBOption}; +use herodb::{options::DBOption, server::Server}; use std::time::Duration; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpStream; @@ -7,11 +7,11 @@ use tokio::time::sleep; #[tokio::test] async fn debug_hset_return_value() { let test_dir = "/tmp/herodb_debug_hset_return"; - + // Clean up any existing test data let _ = std::fs::remove_dir_all(&test_dir); std::fs::create_dir_all(&test_dir).unwrap(); - + let option = DBOption { dir: test_dir.to_string(), port: 16390, @@ -20,38 +20,42 @@ async fn debug_hset_return_value() { encryption_key: None, backend: herodb::options::BackendType::Redb, }; - + let mut server = Server::new(option).await; - + // Start server in background tokio::spawn(async move { let listener = tokio::net::TcpListener::bind("127.0.0.1:16390") .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(200)).await; - + // Connect and test HSET let mut stream = TcpStream::connect("127.0.0.1:16390").await.unwrap(); - + // Send HSET command let cmd = "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n"; stream.write_all(cmd.as_bytes()).await.unwrap(); - + let mut buffer = [0; 1024]; let n = stream.read(&mut buffer).await.unwrap(); let response = String::from_utf8_lossy(&buffer[..n]); - + println!("HSET response: {}", response); println!("Response bytes: {:?}", &buffer[..n]); - + // Check if response contains "1" - assert!(response.contains("1"), "Expected response to contain '1', got: {}", response); -} \ No newline at end of file + assert!( + response.contains("1"), + "Expected response to contain '1', got: {}", + response + ); +} diff --git a/tests/debug_protocol.rs b/tests/debug_protocol.rs index 8df61e7..0e9e305 100644 --- a/tests/debug_protocol.rs +++ b/tests/debug_protocol.rs @@ -1,12 +1,15 @@ -use herodb::protocol::Protocol; use herodb::cmd::Cmd; +use herodb::protocol::Protocol; #[test] fn test_protocol_parsing() { // Test TYPE command parsing let type_cmd = "*2\r\n$4\r\nTYPE\r\n$7\r\nnoexist\r\n"; - println!("Parsing TYPE command: {}", type_cmd.replace("\r\n", "\\r\\n")); - + println!( + "Parsing TYPE command: {}", + type_cmd.replace("\r\n", "\\r\\n") + ); + match Protocol::from(type_cmd) { Ok((protocol, _)) => { println!("Protocol parsed successfully: {:?}", protocol); @@ -17,11 +20,14 @@ fn test_protocol_parsing() { } Err(e) => println!("Protocol parsing failed: {:?}", e), } - + // Test HEXISTS command parsing let hexists_cmd = "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$7\r\nnoexist\r\n"; - println!("\nParsing HEXISTS command: {}", hexists_cmd.replace("\r\n", "\\r\\n")); - + println!( + "\nParsing HEXISTS command: {}", + hexists_cmd.replace("\r\n", "\\r\\n") + ); + match Protocol::from(hexists_cmd) { Ok((protocol, _)) => { println!("Protocol parsed successfully: {:?}", protocol); @@ -32,4 +38,4 @@ fn test_protocol_parsing() { } Err(e) => println!("Protocol parsing failed: {:?}", e), } -} \ No newline at end of file +} diff --git a/tests/redis_integration_tests.rs b/tests/redis_integration_tests.rs index 47033e1..a647551 100644 --- a/tests/redis_integration_tests.rs +++ b/tests/redis_integration_tests.rs @@ -81,13 +81,13 @@ fn setup_server() -> (ServerProcessGuard, u16) { ]) .spawn() .expect("Failed to start server process"); - + // Create a new guard that also owns the test directory path let guard = ServerProcessGuard { process: child, test_dir, }; - + // Give the server time to build and start (cargo run may compile first) std::thread::sleep(Duration::from_millis(2500)); @@ -206,7 +206,9 @@ async fn test_expiration(conn: &mut Connection) { async fn test_scan_operations(conn: &mut Connection) { cleanup_keys(conn).await; for i in 0..5 { - let _: () = conn.set(format!("key{}", i), format!("value{}", i)).unwrap(); + let _: () = conn + .set(format!("key{}", i), format!("value{}", i)) + .unwrap(); } let result: (u64, Vec) = redis::cmd("SCAN") .arg(0) @@ -253,7 +255,9 @@ async fn test_scan_with_count(conn: &mut Connection) { async fn test_hscan_operations(conn: &mut Connection) { cleanup_keys(conn).await; for i in 0..3 { - let _: () = conn.hset("testhash", format!("field{}", i), format!("value{}", i)).unwrap(); + let _: () = conn + .hset("testhash", format!("field{}", i), format!("value{}", i)) + .unwrap(); } let result: (u64, Vec) = redis::cmd("HSCAN") .arg("testhash") @@ -273,8 +277,16 @@ async fn test_hscan_operations(conn: &mut Connection) { async fn test_transaction_operations(conn: &mut Connection) { cleanup_keys(conn).await; let _: () = redis::cmd("MULTI").query(conn).unwrap(); - let _: () = redis::cmd("SET").arg("key1").arg("value1").query(conn).unwrap(); - let _: () = redis::cmd("SET").arg("key2").arg("value2").query(conn).unwrap(); + let _: () = redis::cmd("SET") + .arg("key1") + .arg("value1") + .query(conn) + .unwrap(); + let _: () = redis::cmd("SET") + .arg("key2") + .arg("value2") + .query(conn) + .unwrap(); let _: Vec = redis::cmd("EXEC").query(conn).unwrap(); let result: String = conn.get("key1").unwrap(); assert_eq!(result, "value1"); @@ -286,7 +298,11 @@ async fn test_transaction_operations(conn: &mut Connection) { async fn test_discard_transaction(conn: &mut Connection) { cleanup_keys(conn).await; let _: () = redis::cmd("MULTI").query(conn).unwrap(); - let _: () = redis::cmd("SET").arg("discard").arg("value").query(conn).unwrap(); + let _: () = redis::cmd("SET") + .arg("discard") + .arg("value") + .query(conn) + .unwrap(); let _: () = redis::cmd("DISCARD").query(conn).unwrap(); let result: Option = conn.get("discard").unwrap(); assert_eq!(result, None); @@ -306,7 +322,6 @@ async fn test_type_command(conn: &mut Connection) { cleanup_keys(conn).await; } - async fn test_info_command(conn: &mut Connection) { cleanup_keys(conn).await; let result: String = redis::cmd("INFO").query(conn).unwrap(); diff --git a/tests/redis_tests.rs b/tests/redis_tests.rs index f6e8a13..589577c 100644 --- a/tests/redis_tests.rs +++ b/tests/redis_tests.rs @@ -1,4 +1,4 @@ -use herodb::{server::Server, options::DBOption}; +use herodb::{options::DBOption, server::Server}; use std::time::Duration; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpStream; @@ -8,14 +8,14 @@ use tokio::time::sleep; async fn start_test_server(test_name: &str) -> (Server, u16) { use std::sync::atomic::{AtomicU16, Ordering}; static PORT_COUNTER: AtomicU16 = AtomicU16::new(16379); - + let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst); let test_dir = format!("/tmp/herodb_test_{}", test_name); - + // Clean up and create test directory let _ = std::fs::remove_dir_all(&test_dir); std::fs::create_dir_all(&test_dir).unwrap(); - + let option = DBOption { dir: test_dir, port, @@ -24,7 +24,7 @@ async fn start_test_server(test_name: &str) -> (Server, u16) { encryption_key: None, backend: herodb::options::BackendType::Redb, }; - + let server = Server::new(option).await; (server, port) } @@ -47,7 +47,7 @@ async fn connect_to_server(port: u16) -> TcpStream { // Helper function to send command and get response async fn send_command(stream: &mut TcpStream, command: &str) -> String { stream.write_all(command.as_bytes()).await.unwrap(); - + let mut buffer = [0; 1024]; let n = stream.read(&mut buffer).await.unwrap(); String::from_utf8_lossy(&buffer[..n]).to_string() @@ -56,22 +56,22 @@ async fn send_command(stream: &mut TcpStream, command: &str) -> String { #[tokio::test] async fn test_basic_ping() { let (mut server, port) = start_test_server("ping").await; - + // Start server in background tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; let response = send_command(&mut stream, "*1\r\n$4\r\nPING\r\n").await; assert!(response.contains("PONG")); @@ -80,40 +80,44 @@ async fn test_basic_ping() { #[tokio::test] async fn test_string_operations() { let (mut server, port) = start_test_server("string").await; - + // Start server in background tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test SET - let response = send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", + ) + .await; assert!(response.contains("OK")); - + // Test GET let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$3\r\nkey\r\n").await; assert!(response.contains("value")); - + // Test GET non-existent key let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$7\r\nnoexist\r\n").await; assert!(response.contains("$-1")); // NULL response - + // Test DEL let response = send_command(&mut stream, "*2\r\n$3\r\nDEL\r\n$3\r\nkey\r\n").await; assert!(response.contains("1")); - + // Test GET after DEL let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$3\r\nkey\r\n").await; assert!(response.contains("$-1")); // NULL response @@ -122,33 +126,37 @@ async fn test_string_operations() { #[tokio::test] async fn test_incr_operations() { let (mut server, port) = start_test_server("incr").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test INCR on non-existent key let response = send_command(&mut stream, "*2\r\n$4\r\nINCR\r\n$7\r\ncounter\r\n").await; assert!(response.contains("1")); - + // Test INCR on existing key let response = send_command(&mut stream, "*2\r\n$4\r\nINCR\r\n$7\r\ncounter\r\n").await; assert!(response.contains("2")); - + // Test INCR on string value (should fail) - send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nhello\r\n").await; + send_command( + &mut stream, + "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nhello\r\n", + ) + .await; let response = send_command(&mut stream, "*2\r\n$4\r\nINCR\r\n$6\r\nstring\r\n").await; assert!(response.contains("ERR")); } @@ -156,63 +164,83 @@ async fn test_incr_operations() { #[tokio::test] async fn test_hash_operations() { let (mut server, port) = start_test_server("hash").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test HSET - let response = send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n").await; + let response = send_command( + &mut stream, + "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n", + ) + .await; assert!(response.contains("1")); // 1 new field - + // Test HGET - let response = send_command(&mut stream, "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n", + ) + .await; assert!(response.contains("value1")); - + // Test HSET multiple fields let response = send_command(&mut stream, "*6\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield2\r\n$6\r\nvalue2\r\n$6\r\nfield3\r\n$6\r\nvalue3\r\n").await; assert!(response.contains("2")); // 2 new fields - + // Test HGETALL let response = send_command(&mut stream, "*2\r\n$7\r\nHGETALL\r\n$4\r\nhash\r\n").await; assert!(response.contains("field1")); assert!(response.contains("value1")); assert!(response.contains("field2")); assert!(response.contains("value2")); - + // Test HLEN let response = send_command(&mut stream, "*2\r\n$4\r\nHLEN\r\n$4\r\nhash\r\n").await; assert!(response.contains("3")); - + // Test HEXISTS - let response = send_command(&mut stream, "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$6\r\nfield1\r\n", + ) + .await; assert!(response.contains("1")); - - let response = send_command(&mut stream, "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$7\r\nnoexist\r\n").await; + + let response = send_command( + &mut stream, + "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$7\r\nnoexist\r\n", + ) + .await; assert!(response.contains("0")); - + // Test HDEL - let response = send_command(&mut stream, "*3\r\n$4\r\nHDEL\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$4\r\nHDEL\r\n$4\r\nhash\r\n$6\r\nfield1\r\n", + ) + .await; assert!(response.contains("1")); - + // Test HKEYS let response = send_command(&mut stream, "*2\r\n$5\r\nHKEYS\r\n$4\r\nhash\r\n").await; assert!(response.contains("field2")); assert!(response.contains("field3")); assert!(!response.contains("field1")); // Should be deleted - + // Test HVALS let response = send_command(&mut stream, "*2\r\n$5\r\nHVALS\r\n$4\r\nhash\r\n").await; assert!(response.contains("value2")); @@ -222,46 +250,50 @@ async fn test_hash_operations() { #[tokio::test] async fn test_expiration() { let (mut server, port) = start_test_server("expiration").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test SETEX (expire in 1 second) - let response = send_command(&mut stream, "*5\r\n$3\r\nSET\r\n$6\r\nexpkey\r\n$5\r\nvalue\r\n$2\r\nEX\r\n$1\r\n1\r\n").await; + let response = send_command( + &mut stream, + "*5\r\n$3\r\nSET\r\n$6\r\nexpkey\r\n$5\r\nvalue\r\n$2\r\nEX\r\n$1\r\n1\r\n", + ) + .await; assert!(response.contains("OK")); - + // Test TTL let response = send_command(&mut stream, "*2\r\n$3\r\nTTL\r\n$6\r\nexpkey\r\n").await; assert!(response.contains("1") || response.contains("0")); // Should be 1 or 0 seconds - + // Test EXISTS let response = send_command(&mut stream, "*2\r\n$6\r\nEXISTS\r\n$6\r\nexpkey\r\n").await; assert!(response.contains("1")); - + // Wait for expiration sleep(Duration::from_millis(1100)).await; - + // Test GET after expiration let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$6\r\nexpkey\r\n").await; assert!(response.contains("$-1")); // Should be NULL - + // Test TTL after expiration let response = send_command(&mut stream, "*2\r\n$3\r\nTTL\r\n$6\r\nexpkey\r\n").await; assert!(response.contains("-2")); // Key doesn't exist - + // Test EXISTS after expiration let response = send_command(&mut stream, "*2\r\n$6\r\nEXISTS\r\n$6\r\nexpkey\r\n").await; assert!(response.contains("0")); @@ -270,33 +302,37 @@ async fn test_expiration() { #[tokio::test] async fn test_scan_operations() { let (mut server, port) = start_test_server("scan").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Set up test data for i in 0..5 { let cmd = format!("*3\r\n$3\r\nSET\r\n$4\r\nkey{}\r\n$6\r\nvalue{}\r\n", i, i); send_command(&mut stream, &cmd).await; } - + // Test SCAN - let response = send_command(&mut stream, "*6\r\n$4\r\nSCAN\r\n$1\r\n0\r\n$5\r\nMATCH\r\n$1\r\n*\r\n$5\r\nCOUNT\r\n$2\r\n10\r\n").await; + let response = send_command( + &mut stream, + "*6\r\n$4\r\nSCAN\r\n$1\r\n0\r\n$5\r\nMATCH\r\n$1\r\n*\r\n$5\r\nCOUNT\r\n$2\r\n10\r\n", + ) + .await; assert!(response.contains("key")); - + // Test KEYS let response = send_command(&mut stream, "*2\r\n$4\r\nKEYS\r\n$1\r\n*\r\n").await; assert!(response.contains("key0")); @@ -306,29 +342,32 @@ async fn test_scan_operations() { #[tokio::test] async fn test_hscan_operations() { let (mut server, port) = start_test_server("hscan").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Set up hash data for i in 0..3 { - let cmd = format!("*4\r\n$4\r\nHSET\r\n$8\r\ntesthash\r\n$6\r\nfield{}\r\n$6\r\nvalue{}\r\n", i, i); + let cmd = format!( + "*4\r\n$4\r\nHSET\r\n$8\r\ntesthash\r\n$6\r\nfield{}\r\n$6\r\nvalue{}\r\n", + i, i + ); send_command(&mut stream, &cmd).await; } - + // Test HSCAN let response = send_command(&mut stream, "*7\r\n$5\r\nHSCAN\r\n$8\r\ntesthash\r\n$1\r\n0\r\n$5\r\nMATCH\r\n$1\r\n*\r\n$5\r\nCOUNT\r\n$2\r\n10\r\n").await; assert!(response.contains("field")); @@ -338,42 +377,50 @@ async fn test_hscan_operations() { #[tokio::test] async fn test_transaction_operations() { let (mut server, port) = start_test_server("transaction").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test MULTI let response = send_command(&mut stream, "*1\r\n$5\r\nMULTI\r\n").await; assert!(response.contains("OK")); - + // Test queued commands - let response = send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$3\r\nSET\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n", + ) + .await; assert!(response.contains("QUEUED")); - - let response = send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n").await; + + let response = send_command( + &mut stream, + "*3\r\n$3\r\nSET\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n", + ) + .await; assert!(response.contains("QUEUED")); - + // Test EXEC let response = send_command(&mut stream, "*1\r\n$4\r\nEXEC\r\n").await; assert!(response.contains("OK")); // Should contain results of executed commands - + // Verify commands were executed let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$4\r\nkey1\r\n").await; assert!(response.contains("value1")); - + let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$4\r\nkey2\r\n").await; assert!(response.contains("value2")); } @@ -381,35 +428,39 @@ async fn test_transaction_operations() { #[tokio::test] async fn test_discard_transaction() { let (mut server, port) = start_test_server("discard").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test MULTI let response = send_command(&mut stream, "*1\r\n$5\r\nMULTI\r\n").await; assert!(response.contains("OK")); - + // Test queued command - let response = send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$7\r\ndiscard\r\n$5\r\nvalue\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$3\r\nSET\r\n$7\r\ndiscard\r\n$5\r\nvalue\r\n", + ) + .await; assert!(response.contains("QUEUED")); - + // Test DISCARD let response = send_command(&mut stream, "*1\r\n$7\r\nDISCARD\r\n").await; assert!(response.contains("OK")); - + // Verify command was not executed let response = send_command(&mut stream, "*2\r\n$3\r\nGET\r\n$7\r\ndiscard\r\n").await; assert!(response.contains("$-1")); // Should be NULL @@ -418,33 +469,41 @@ async fn test_discard_transaction() { #[tokio::test] async fn test_type_command() { let (mut server, port) = start_test_server("type").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test string type - send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nvalue\r\n").await; + send_command( + &mut stream, + "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nvalue\r\n", + ) + .await; let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$6\r\nstring\r\n").await; assert!(response.contains("string")); - + // Test hash type - send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$5\r\nfield\r\n$5\r\nvalue\r\n").await; + send_command( + &mut stream, + "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$5\r\nfield\r\n$5\r\nvalue\r\n", + ) + .await; let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$4\r\nhash\r\n").await; assert!(response.contains("hash")); - + // Test non-existent key let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$7\r\nnoexist\r\n").await; assert!(response.contains("none")); @@ -453,30 +512,38 @@ async fn test_type_command() { #[tokio::test] async fn test_config_commands() { let (mut server, port) = start_test_server("config").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test CONFIG GET databases - let response = send_command(&mut stream, "*3\r\n$6\r\nCONFIG\r\n$3\r\nGET\r\n$9\r\ndatabases\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$6\r\nCONFIG\r\n$3\r\nGET\r\n$9\r\ndatabases\r\n", + ) + .await; assert!(response.contains("databases")); assert!(response.contains("16")); - + // Test CONFIG GET dir - let response = send_command(&mut stream, "*3\r\n$6\r\nCONFIG\r\n$3\r\nGET\r\n$3\r\ndir\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$6\r\nCONFIG\r\n$3\r\nGET\r\n$3\r\ndir\r\n", + ) + .await; assert!(response.contains("dir")); assert!(response.contains("/tmp/herodb_test_config")); } @@ -484,27 +551,27 @@ async fn test_config_commands() { #[tokio::test] async fn test_info_command() { let (mut server, port) = start_test_server("info").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test INFO let response = send_command(&mut stream, "*1\r\n$4\r\nINFO\r\n").await; assert!(response.contains("redis_version")); - + // Test INFO replication let response = send_command(&mut stream, "*2\r\n$4\r\nINFO\r\n$11\r\nreplication\r\n").await; assert!(response.contains("role:master")); @@ -513,36 +580,44 @@ async fn test_info_command() { #[tokio::test] async fn test_error_handling() { let (mut server, port) = start_test_server("error").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test WRONGTYPE error - try to use hash command on string - send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nvalue\r\n").await; - let response = send_command(&mut stream, "*3\r\n$4\r\nHGET\r\n$6\r\nstring\r\n$5\r\nfield\r\n").await; + send_command( + &mut stream, + "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nvalue\r\n", + ) + .await; + let response = send_command( + &mut stream, + "*3\r\n$4\r\nHGET\r\n$6\r\nstring\r\n$5\r\nfield\r\n", + ) + .await; assert!(response.contains("WRONGTYPE")); - + // Test unknown command let response = send_command(&mut stream, "*1\r\n$7\r\nUNKNOWN\r\n").await; assert!(response.contains("unknown cmd") || response.contains("ERR")); - + // Test EXEC without MULTI let response = send_command(&mut stream, "*1\r\n$4\r\nEXEC\r\n").await; assert!(response.contains("ERR")); - + // Test DISCARD without MULTI let response = send_command(&mut stream, "*1\r\n$7\r\nDISCARD\r\n").await; assert!(response.contains("ERR")); @@ -551,29 +626,37 @@ async fn test_error_handling() { #[tokio::test] async fn test_list_operations() { let (mut server, port) = start_test_server("list").await; - + tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(100)).await; - + let mut stream = connect_to_server(port).await; - + // Test LPUSH - let response = send_command(&mut stream, "*4\r\n$5\r\nLPUSH\r\n$4\r\nlist\r\n$1\r\na\r\n$1\r\nb\r\n").await; + let response = send_command( + &mut stream, + "*4\r\n$5\r\nLPUSH\r\n$4\r\nlist\r\n$1\r\na\r\n$1\r\nb\r\n", + ) + .await; assert!(response.contains("2")); // 2 elements - + // Test RPUSH - let response = send_command(&mut stream, "*4\r\n$5\r\nRPUSH\r\n$4\r\nlist\r\n$1\r\nc\r\n$1\r\nd\r\n").await; + let response = send_command( + &mut stream, + "*4\r\n$5\r\nRPUSH\r\n$4\r\nlist\r\n$1\r\nc\r\n$1\r\nd\r\n", + ) + .await; assert!(response.contains("4")); // 4 elements // Test LLEN @@ -581,29 +664,52 @@ async fn test_list_operations() { assert!(response.contains("4")); // Test LRANGE - let response = send_command(&mut stream, "*4\r\n$6\r\nLRANGE\r\n$4\r\nlist\r\n$1\r\n0\r\n$2\r\n-1\r\n").await; - assert_eq!(response, "*4\r\n$1\r\nb\r\n$1\r\na\r\n$1\r\nc\r\n$1\r\nd\r\n"); - + let response = send_command( + &mut stream, + "*4\r\n$6\r\nLRANGE\r\n$4\r\nlist\r\n$1\r\n0\r\n$2\r\n-1\r\n", + ) + .await; + assert_eq!( + response, + "*4\r\n$1\r\nb\r\n$1\r\na\r\n$1\r\nc\r\n$1\r\nd\r\n" + ); + // Test LINDEX - let response = send_command(&mut stream, "*3\r\n$6\r\nLINDEX\r\n$4\r\nlist\r\n$1\r\n0\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$6\r\nLINDEX\r\n$4\r\nlist\r\n$1\r\n0\r\n", + ) + .await; assert_eq!(response, "$1\r\nb\r\n"); - + // Test LPOP let response = send_command(&mut stream, "*2\r\n$4\r\nLPOP\r\n$4\r\nlist\r\n").await; assert_eq!(response, "$1\r\nb\r\n"); - + // Test RPOP let response = send_command(&mut stream, "*2\r\n$4\r\nRPOP\r\n$4\r\nlist\r\n").await; assert_eq!(response, "$1\r\nd\r\n"); // Test LREM - send_command(&mut stream, "*3\r\n$5\r\nLPUSH\r\n$4\r\nlist\r\n$1\r\na\r\n").await; // list is now a, c, a - let response = send_command(&mut stream, "*4\r\n$4\r\nLREM\r\n$4\r\nlist\r\n$1\r\n1\r\n$1\r\na\r\n").await; + send_command( + &mut stream, + "*3\r\n$5\r\nLPUSH\r\n$4\r\nlist\r\n$1\r\na\r\n", + ) + .await; // list is now a, c, a + let response = send_command( + &mut stream, + "*4\r\n$4\r\nLREM\r\n$4\r\nlist\r\n$1\r\n1\r\n$1\r\na\r\n", + ) + .await; assert!(response.contains("1")); // Test LTRIM - let response = send_command(&mut stream, "*4\r\n$5\r\nLTRIM\r\n$4\r\nlist\r\n$1\r\n0\r\n$1\r\n0\r\n").await; + let response = send_command( + &mut stream, + "*4\r\n$5\r\nLTRIM\r\n$4\r\nlist\r\n$1\r\n0\r\n$1\r\n0\r\n", + ) + .await; assert!(response.contains("OK")); let response = send_command(&mut stream, "*2\r\n$4\r\nLLEN\r\n$4\r\nlist\r\n").await; assert!(response.contains("1")); -} \ No newline at end of file +} diff --git a/tests/simple_integration_test.rs b/tests/simple_integration_test.rs index 42269df..d1704e3 100644 --- a/tests/simple_integration_test.rs +++ b/tests/simple_integration_test.rs @@ -1,23 +1,23 @@ -use herodb::{server::Server, options::DBOption}; +use herodb::{options::DBOption, server::Server}; use std::time::Duration; -use tokio::time::sleep; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpStream; +use tokio::time::sleep; // Helper function to start a test server with clean data directory async fn start_test_server(test_name: &str) -> (Server, u16) { use std::sync::atomic::{AtomicU16, Ordering}; static PORT_COUNTER: AtomicU16 = AtomicU16::new(17000); - + // Get a unique port for this test let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst); - + let test_dir = format!("/tmp/herodb_test_{}", test_name); - + // Clean up any existing test data let _ = std::fs::remove_dir_all(&test_dir); std::fs::create_dir_all(&test_dir).unwrap(); - + let option = DBOption { dir: test_dir, port, @@ -26,16 +26,18 @@ async fn start_test_server(test_name: &str) -> (Server, u16) { encryption_key: None, backend: herodb::options::BackendType::Redb, }; - + let server = Server::new(option).await; (server, port) } // Helper function to send Redis command and get response async fn send_redis_command(port: u16, command: &str) -> String { - let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)).await.unwrap(); + let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)) + .await + .unwrap(); stream.write_all(command.as_bytes()).await.unwrap(); - + let mut buffer = [0; 1024]; let n = stream.read(&mut buffer).await.unwrap(); String::from_utf8_lossy(&buffer[..n]).to_string() @@ -44,13 +46,13 @@ async fn send_redis_command(port: u16, command: &str) -> String { #[tokio::test] async fn test_basic_redis_functionality() { let (mut server, port) = start_test_server("basic").await; - + // Start server in background with timeout let server_handle = tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + // Accept only a few connections for testing for _ in 0..10 { if let Ok((stream, _)) = listener.accept().await { @@ -58,68 +60,79 @@ async fn test_basic_redis_functionality() { } } }); - + sleep(Duration::from_millis(100)).await; - + // Test PING let response = send_redis_command(port, "*1\r\n$4\r\nPING\r\n").await; assert!(response.contains("PONG")); - + // Test SET - let response = send_redis_command(port, "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n").await; + let response = + send_redis_command(port, "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n").await; assert!(response.contains("OK")); - + // Test GET let response = send_redis_command(port, "*2\r\n$3\r\nGET\r\n$3\r\nkey\r\n").await; assert!(response.contains("value")); - + // Test HSET - let response = send_redis_command(port, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$5\r\nfield\r\n$5\r\nvalue\r\n").await; + let response = send_redis_command( + port, + "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$5\r\nfield\r\n$5\r\nvalue\r\n", + ) + .await; assert!(response.contains("1")); - + // Test HGET - let response = send_redis_command(port, "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$5\r\nfield\r\n").await; + let response = + send_redis_command(port, "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$5\r\nfield\r\n").await; assert!(response.contains("value")); - + // Test EXISTS let response = send_redis_command(port, "*2\r\n$6\r\nEXISTS\r\n$3\r\nkey\r\n").await; assert!(response.contains("1")); - + // Test TTL let response = send_redis_command(port, "*2\r\n$3\r\nTTL\r\n$3\r\nkey\r\n").await; assert!(response.contains("-1")); // No expiration - + // Test TYPE let response = send_redis_command(port, "*2\r\n$4\r\nTYPE\r\n$3\r\nkey\r\n").await; assert!(response.contains("string")); - + // Test QUIT to close connection gracefully - let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)).await.unwrap(); - stream.write_all("*1\r\n$4\r\nQUIT\r\n".as_bytes()).await.unwrap(); + let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)) + .await + .unwrap(); + stream + .write_all("*1\r\n$4\r\nQUIT\r\n".as_bytes()) + .await + .unwrap(); let mut buffer = [0; 1024]; let n = stream.read(&mut buffer).await.unwrap(); let response = String::from_utf8_lossy(&buffer[..n]); assert!(response.contains("OK")); - + // Ensure the stream is closed stream.shutdown().await.unwrap(); // Stop the server server_handle.abort(); - + println!("✅ All basic Redis functionality tests passed!"); } #[tokio::test] async fn test_hash_operations() { let (mut server, port) = start_test_server("hash_ops").await; - + // Start server in background with timeout let server_handle = tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + // Accept only a few connections for testing for _ in 0..5 { if let Ok((stream, _)) = listener.accept().await { @@ -127,53 +140,57 @@ async fn test_hash_operations() { } } }); - + sleep(Duration::from_millis(100)).await; - + // Test HSET multiple fields let response = send_redis_command(port, "*6\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n$6\r\nfield2\r\n$6\r\nvalue2\r\n").await; assert!(response.contains("2")); // 2 new fields - + // Test HGETALL let response = send_redis_command(port, "*2\r\n$7\r\nHGETALL\r\n$4\r\nhash\r\n").await; assert!(response.contains("field1")); assert!(response.contains("value1")); assert!(response.contains("field2")); assert!(response.contains("value2")); - + // Test HEXISTS - let response = send_redis_command(port, "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await; + let response = send_redis_command( + port, + "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$6\r\nfield1\r\n", + ) + .await; assert!(response.contains("1")); - + // Test HLEN let response = send_redis_command(port, "*2\r\n$4\r\nHLEN\r\n$4\r\nhash\r\n").await; assert!(response.contains("2")); - + // Test HSCAN let response = send_redis_command(port, "*7\r\n$5\r\nHSCAN\r\n$4\r\nhash\r\n$1\r\n0\r\n$5\r\nMATCH\r\n$1\r\n*\r\n$5\r\nCOUNT\r\n$2\r\n10\r\n").await; assert!(response.contains("field1")); assert!(response.contains("value1")); assert!(response.contains("field2")); assert!(response.contains("value2")); - + // Stop the server // For hash operations, we don't have a persistent stream, so we'll just abort the server. // The server should handle closing its connections. server_handle.abort(); - + println!("✅ All hash operations tests passed!"); } #[tokio::test] async fn test_transaction_operations() { let (mut server, port) = start_test_server("transactions").await; - + // Start server in background with timeout let server_handle = tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + // Accept only a few connections for testing for _ in 0..5 { if let Ok((stream, _)) = listener.accept().await { @@ -181,49 +198,69 @@ async fn test_transaction_operations() { } } }); - + sleep(Duration::from_millis(100)).await; - + // Use a single connection for the transaction - let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)).await.unwrap(); - + let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port)) + .await + .unwrap(); + // Test MULTI - stream.write_all("*1\r\n$5\r\nMULTI\r\n".as_bytes()).await.unwrap(); + stream + .write_all("*1\r\n$5\r\nMULTI\r\n".as_bytes()) + .await + .unwrap(); let mut buffer = [0; 1024]; let n = stream.read(&mut buffer).await.unwrap(); let response = String::from_utf8_lossy(&buffer[..n]); assert!(response.contains("OK")); - + // Test queued commands - stream.write_all("*3\r\n$3\r\nSET\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n".as_bytes()).await.unwrap(); + stream + .write_all("*3\r\n$3\r\nSET\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n".as_bytes()) + .await + .unwrap(); let n = stream.read(&mut buffer).await.unwrap(); let response = String::from_utf8_lossy(&buffer[..n]); assert!(response.contains("QUEUED")); - - stream.write_all("*3\r\n$3\r\nSET\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n".as_bytes()).await.unwrap(); + + stream + .write_all("*3\r\n$3\r\nSET\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n".as_bytes()) + .await + .unwrap(); let n = stream.read(&mut buffer).await.unwrap(); let response = String::from_utf8_lossy(&buffer[..n]); assert!(response.contains("QUEUED")); - + // Test EXEC - stream.write_all("*1\r\n$4\r\nEXEC\r\n".as_bytes()).await.unwrap(); + stream + .write_all("*1\r\n$4\r\nEXEC\r\n".as_bytes()) + .await + .unwrap(); let n = stream.read(&mut buffer).await.unwrap(); let response = String::from_utf8_lossy(&buffer[..n]); assert!(response.contains("OK")); // Should contain array of OK responses - + // Verify commands were executed - stream.write_all("*2\r\n$3\r\nGET\r\n$4\r\nkey1\r\n".as_bytes()).await.unwrap(); + stream + .write_all("*2\r\n$3\r\nGET\r\n$4\r\nkey1\r\n".as_bytes()) + .await + .unwrap(); let n = stream.read(&mut buffer).await.unwrap(); let response = String::from_utf8_lossy(&buffer[..n]); assert!(response.contains("value1")); - - stream.write_all("*2\r\n$3\r\nGET\r\n$4\r\nkey2\r\n".as_bytes()).await.unwrap(); + + stream + .write_all("*2\r\n$3\r\nGET\r\n$4\r\nkey2\r\n".as_bytes()) + .await + .unwrap(); let n = stream.read(&mut buffer).await.unwrap(); let response = String::from_utf8_lossy(&buffer[..n]); assert!(response.contains("value2")); // Stop the server server_handle.abort(); - + println!("✅ All transaction operations tests passed!"); -} \ No newline at end of file +} diff --git a/tests/simple_redis_test.rs b/tests/simple_redis_test.rs index 8afb304..bf9aee7 100644 --- a/tests/simple_redis_test.rs +++ b/tests/simple_redis_test.rs @@ -1,4 +1,4 @@ -use herodb::{server::Server, options::DBOption}; +use herodb::{options::DBOption, server::Server}; use std::time::Duration; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpStream; @@ -8,14 +8,14 @@ use tokio::time::sleep; async fn start_test_server(test_name: &str) -> (Server, u16) { use std::sync::atomic::{AtomicU16, Ordering}; static PORT_COUNTER: AtomicU16 = AtomicU16::new(16500); - + let port = PORT_COUNTER.fetch_add(1, Ordering::SeqCst); let test_dir = format!("/tmp/herodb_simple_test_{}", test_name); - + // Clean up any existing test data let _ = std::fs::remove_dir_all(&test_dir); std::fs::create_dir_all(&test_dir).unwrap(); - + let option = DBOption { dir: test_dir, port, @@ -24,7 +24,7 @@ async fn start_test_server(test_name: &str) -> (Server, u16) { encryption_key: None, backend: herodb::options::BackendType::Redb, }; - + let server = Server::new(option).await; (server, port) } @@ -32,7 +32,7 @@ async fn start_test_server(test_name: &str) -> (Server, u16) { // Helper function to send command and get response async fn send_command(stream: &mut TcpStream, command: &str) -> String { stream.write_all(command.as_bytes()).await.unwrap(); - + let mut buffer = [0; 1024]; let n = stream.read(&mut buffer).await.unwrap(); String::from_utf8_lossy(&buffer[..n]).to_string() @@ -56,22 +56,22 @@ async fn connect_to_server(port: u16) -> TcpStream { #[tokio::test] async fn test_basic_ping_simple() { let (mut server, port) = start_test_server("ping").await; - + // Start server in background tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(200)).await; - + let mut stream = connect_to_server(port).await; let response = send_command(&mut stream, "*1\r\n$4\r\nPING\r\n").await; assert!(response.contains("PONG")); @@ -80,31 +80,43 @@ async fn test_basic_ping_simple() { #[tokio::test] async fn test_hset_clean_db() { let (mut server, port) = start_test_server("hset_clean").await; - + // Start server in background tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(200)).await; - + let mut stream = connect_to_server(port).await; - + // Test HSET - should return 1 for new field - let response = send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n").await; + let response = send_command( + &mut stream, + "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n", + ) + .await; println!("HSET response: {}", response); - assert!(response.contains("1"), "Expected HSET to return 1, got: {}", response); - + assert!( + response.contains("1"), + "Expected HSET to return 1, got: {}", + response + ); + // Test HGET - let response = send_command(&mut stream, "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$4\r\nHGET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n", + ) + .await; println!("HGET response: {}", response); assert!(response.contains("value1")); } @@ -112,73 +124,101 @@ async fn test_hset_clean_db() { #[tokio::test] async fn test_type_command_simple() { let (mut server, port) = start_test_server("type").await; - + // Start server in background tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(200)).await; - + let mut stream = connect_to_server(port).await; - + // Test string type - send_command(&mut stream, "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nvalue\r\n").await; + send_command( + &mut stream, + "*3\r\n$3\r\nSET\r\n$6\r\nstring\r\n$5\r\nvalue\r\n", + ) + .await; let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$6\r\nstring\r\n").await; println!("TYPE string response: {}", response); assert!(response.contains("string")); - + // Test hash type - send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$5\r\nfield\r\n$5\r\nvalue\r\n").await; + send_command( + &mut stream, + "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$5\r\nfield\r\n$5\r\nvalue\r\n", + ) + .await; let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$4\r\nhash\r\n").await; println!("TYPE hash response: {}", response); assert!(response.contains("hash")); - + // Test non-existent key let response = send_command(&mut stream, "*2\r\n$4\r\nTYPE\r\n$7\r\nnoexist\r\n").await; println!("TYPE noexist response: {}", response); - assert!(response.contains("none"), "Expected 'none' for non-existent key, got: {}", response); + assert!( + response.contains("none"), + "Expected 'none' for non-existent key, got: {}", + response + ); } #[tokio::test] async fn test_hexists_simple() { let (mut server, port) = start_test_server("hexists").await; - + // Start server in background tokio::spawn(async move { let listener = tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)) .await .unwrap(); - + loop { if let Ok((stream, _)) = listener.accept().await { let _ = server.handle(stream).await; } } }); - + sleep(Duration::from_millis(200)).await; - + let mut stream = connect_to_server(port).await; - + // Set up hash - send_command(&mut stream, "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n").await; - + send_command( + &mut stream, + "*4\r\n$4\r\nHSET\r\n$4\r\nhash\r\n$6\r\nfield1\r\n$6\r\nvalue1\r\n", + ) + .await; + // Test HEXISTS for existing field - let response = send_command(&mut stream, "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$6\r\nfield1\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$6\r\nfield1\r\n", + ) + .await; println!("HEXISTS existing field response: {}", response); assert!(response.contains("1")); - + // Test HEXISTS for non-existent field - let response = send_command(&mut stream, "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$7\r\nnoexist\r\n").await; + let response = send_command( + &mut stream, + "*3\r\n$7\r\nHEXISTS\r\n$4\r\nhash\r\n$7\r\nnoexist\r\n", + ) + .await; println!("HEXISTS non-existent field response: {}", response); - assert!(response.contains("0"), "Expected HEXISTS to return 0 for non-existent field, got: {}", response); -} \ No newline at end of file + assert!( + response.contains("0"), + "Expected HEXISTS to return 0 for non-existent field, got: {}", + response + ); +} diff --git a/tests/usage_suite.rs b/tests/usage_suite.rs index d7298cc..6874edb 100644 --- a/tests/usage_suite.rs +++ b/tests/usage_suite.rs @@ -325,7 +325,11 @@ async fn test_03_scan_and_keys() { let mut s = connect(port).await; for i in 0..5 { - let _ = send_cmd(&mut s, &["SET", &format!("key{}", i), &format!("value{}", i)]).await; + let _ = send_cmd( + &mut s, + &["SET", &format!("key{}", i), &format!("value{}", i)], + ) + .await; } let scan = send_cmd(&mut s, &["SCAN", "0", "MATCH", "key*", "COUNT", "10"]).await; @@ -358,7 +362,11 @@ async fn test_04_hashes_suite() { assert_contains(&h2, "2", "HSET added 2 new fields"); // HMGET - let hmg = send_cmd(&mut s, &["HMGET", "profile:1", "name", "age", "city", "nope"]).await; + let hmg = send_cmd( + &mut s, + &["HMGET", "profile:1", "name", "age", "city", "nope"], + ) + .await; assert_contains(&hmg, "alice", "HMGET name"); assert_contains(&hmg, "30", "HMGET age"); assert_contains(&hmg, "paris", "HMGET city"); @@ -392,7 +400,11 @@ async fn test_04_hashes_suite() { assert_contains(&hnx1, "1", "HSETNX new field -> 1"); // HSCAN - let hscan = send_cmd(&mut s, &["HSCAN", "profile:1", "0", "MATCH", "n*", "COUNT", "10"]).await; + let hscan = send_cmd( + &mut s, + &["HSCAN", "profile:1", "0", "MATCH", "n*", "COUNT", "10"], + ) + .await; assert_contains(&hscan, "name", "HSCAN matches fields starting with n"); assert_contains(&hscan, "nickname", "HSCAN nickname present"); @@ -424,13 +436,21 @@ async fn test_05_lists_suite_including_blpop() { assert_eq_resp(&lidx, "$1\r\nb\r\n", "LINDEX q:jobs 0 should be b"); let lr = send_cmd(&mut a, &["LRANGE", "q:jobs", "0", "-1"]).await; - assert_eq_resp(&lr, "*3\r\n$1\r\nb\r\n$1\r\na\r\n$1\r\nc\r\n", "LRANGE q:jobs 0 -1 should be [b,a,c]"); + assert_eq_resp( + &lr, + "*3\r\n$1\r\nb\r\n$1\r\na\r\n$1\r\nc\r\n", + "LRANGE q:jobs 0 -1 should be [b,a,c]", + ); // LTRIM let ltrim = send_cmd(&mut a, &["LTRIM", "q:jobs", "0", "1"]).await; assert_contains(<rim, "OK", "LTRIM OK"); let lr_post = send_cmd(&mut a, &["LRANGE", "q:jobs", "0", "-1"]).await; - assert_eq_resp(&lr_post, "*2\r\n$1\r\nb\r\n$1\r\na\r\n", "After LTRIM, list [b,a]"); + assert_eq_resp( + &lr_post, + "*2\r\n$1\r\nb\r\n$1\r\na\r\n", + "After LTRIM, list [b,a]", + ); // LREM remove first occurrence of b let lrem = send_cmd(&mut a, &["LREM", "q:jobs", "1", "b"]).await; @@ -444,7 +464,11 @@ async fn test_05_lists_suite_including_blpop() { // LPOP with count on empty -> [] let lpop0 = send_cmd(&mut a, &["LPOP", "q:jobs", "2"]).await; - assert_eq_resp(&lpop0, "*0\r\n", "LPOP with count on empty returns empty array"); + assert_eq_resp( + &lpop0, + "*0\r\n", + "LPOP with count on empty returns empty array", + ); // BLPOP: block on one client, push from another let c1 = connect(port).await; @@ -513,7 +537,7 @@ async fn test_07_age_stateless_suite() { // naive parse for tests let mut lines = resp.lines(); let _ = lines.next(); // *2 - // $len + // $len let _ = lines.next(); let recip = lines.next().unwrap_or("").to_string(); let _ = lines.next(); @@ -548,8 +572,16 @@ async fn test_07_age_stateless_suite() { let v_ok = send_cmd(&mut s, &["AGE", "VERIFY", &verify_pub, "msg", &sig_b64]).await; assert_contains(&v_ok, "1", "VERIFY should be 1 for valid signature"); - let v_bad = send_cmd(&mut s, &["AGE", "VERIFY", &verify_pub, "tampered", &sig_b64]).await; - assert_contains(&v_bad, "0", "VERIFY should be 0 for invalid message/signature"); + let v_bad = send_cmd( + &mut s, + &["AGE", "VERIFY", &verify_pub, "tampered", &sig_b64], + ) + .await; + assert_contains( + &v_bad, + "0", + "VERIFY should be 0 for invalid message/signature", + ); } #[tokio::test] @@ -581,7 +613,7 @@ async fn test_08_age_persistent_named_suite() { skg ); - let sig = send_cmd(&mut s, &["AGE", "SIGNNAME", "app1", "m"] ).await; + let sig = send_cmd(&mut s, &["AGE", "SIGNNAME", "app1", "m"]).await; let sig_b64 = extract_bulk_payload(&sig).expect("Failed to parse bulk payload from SIGNNAME"); let v1 = send_cmd(&mut s, &["AGE", "VERIFYNAME", "app1", "m", &sig_b64]).await; assert_contains(&v1, "1", "VERIFYNAME valid => 1"); @@ -597,60 +629,69 @@ async fn test_08_age_persistent_named_suite() { #[tokio::test] async fn test_10_expire_pexpire_persist() { - let (server, port) = start_test_server("expire_suite").await; - spawn_listener(server, port).await; - sleep(Duration::from_millis(150)).await; + let (server, port) = start_test_server("expire_suite").await; + spawn_listener(server, port).await; + sleep(Duration::from_millis(150)).await; - let mut s = connect(port).await; + let mut s = connect(port).await; - // EXPIRE: seconds - let _ = send_cmd(&mut s, &["SET", "exp:s", "v"]).await; - let ex = send_cmd(&mut s, &["EXPIRE", "exp:s", "1"]).await; - assert_contains(&ex, "1", "EXPIRE exp:s 1 -> 1 (applied)"); - let ttl1 = send_cmd(&mut s, &["TTL", "exp:s"]).await; - assert!( - ttl1.contains("1") || ttl1.contains("0"), - "TTL exp:s should be 1 or 0, got: {}", - ttl1 - ); - sleep(Duration::from_millis(1100)).await; - let get_after = send_cmd(&mut s, &["GET", "exp:s"]).await; - assert_contains(&get_after, "$-1", "GET after expiry should be Null"); - let ttl_after = send_cmd(&mut s, &["TTL", "exp:s"]).await; - assert_contains(&ttl_after, "-2", "TTL after expiry -> -2"); - let exists_after = send_cmd(&mut s, &["EXISTS", "exp:s"]).await; - assert_contains(&exists_after, "0", "EXISTS after expiry -> 0"); + // EXPIRE: seconds + let _ = send_cmd(&mut s, &["SET", "exp:s", "v"]).await; + let ex = send_cmd(&mut s, &["EXPIRE", "exp:s", "1"]).await; + assert_contains(&ex, "1", "EXPIRE exp:s 1 -> 1 (applied)"); + let ttl1 = send_cmd(&mut s, &["TTL", "exp:s"]).await; + assert!( + ttl1.contains("1") || ttl1.contains("0"), + "TTL exp:s should be 1 or 0, got: {}", + ttl1 + ); + sleep(Duration::from_millis(1100)).await; + let get_after = send_cmd(&mut s, &["GET", "exp:s"]).await; + assert_contains(&get_after, "$-1", "GET after expiry should be Null"); + let ttl_after = send_cmd(&mut s, &["TTL", "exp:s"]).await; + assert_contains(&ttl_after, "-2", "TTL after expiry -> -2"); + let exists_after = send_cmd(&mut s, &["EXISTS", "exp:s"]).await; + assert_contains(&exists_after, "0", "EXISTS after expiry -> 0"); - // PEXPIRE: milliseconds - let _ = send_cmd(&mut s, &["SET", "exp:ms", "v"]).await; - let pex = send_cmd(&mut s, &["PEXPIRE", "exp:ms", "1500"]).await; - assert_contains(&pex, "1", "PEXPIRE exp:ms 1500 -> 1 (applied)"); - let ttl_ms1 = send_cmd(&mut s, &["TTL", "exp:ms"]).await; - assert!( - ttl_ms1.contains("1") || ttl_ms1.contains("0"), - "TTL exp:ms should be 1 or 0 soon after PEXPIRE, got: {}", - ttl_ms1 - ); - sleep(Duration::from_millis(1600)).await; - let exists_ms_after = send_cmd(&mut s, &["EXISTS", "exp:ms"]).await; - assert_contains(&exists_ms_after, "0", "EXISTS exp:ms after ms expiry -> 0"); + // PEXPIRE: milliseconds + let _ = send_cmd(&mut s, &["SET", "exp:ms", "v"]).await; + let pex = send_cmd(&mut s, &["PEXPIRE", "exp:ms", "1500"]).await; + assert_contains(&pex, "1", "PEXPIRE exp:ms 1500 -> 1 (applied)"); + let ttl_ms1 = send_cmd(&mut s, &["TTL", "exp:ms"]).await; + assert!( + ttl_ms1.contains("1") || ttl_ms1.contains("0"), + "TTL exp:ms should be 1 or 0 soon after PEXPIRE, got: {}", + ttl_ms1 + ); + sleep(Duration::from_millis(1600)).await; + let exists_ms_after = send_cmd(&mut s, &["EXISTS", "exp:ms"]).await; + assert_contains(&exists_ms_after, "0", "EXISTS exp:ms after ms expiry -> 0"); - // PERSIST: remove expiration - let _ = send_cmd(&mut s, &["SET", "exp:persist", "v"]).await; - let _ = send_cmd(&mut s, &["EXPIRE", "exp:persist", "5"]).await; - let ttl_pre = send_cmd(&mut s, &["TTL", "exp:persist"]).await; - assert!( - ttl_pre.contains("5") || ttl_pre.contains("4") || ttl_pre.contains("3") || ttl_pre.contains("2") || ttl_pre.contains("1") || ttl_pre.contains("0"), - "TTL exp:persist should be >=0 before persist, got: {}", - ttl_pre - ); - let persist1 = send_cmd(&mut s, &["PERSIST", "exp:persist"]).await; - assert_contains(&persist1, "1", "PERSIST should remove expiration"); - let ttl_post = send_cmd(&mut s, &["TTL", "exp:persist"]).await; - assert_contains(&ttl_post, "-1", "TTL after PERSIST -> -1 (no expiration)"); - // Second persist should return 0 (nothing to remove) - let persist2 = send_cmd(&mut s, &["PERSIST", "exp:persist"]).await; - assert_contains(&persist2, "0", "PERSIST again -> 0 (no expiration to remove)"); + // PERSIST: remove expiration + let _ = send_cmd(&mut s, &["SET", "exp:persist", "v"]).await; + let _ = send_cmd(&mut s, &["EXPIRE", "exp:persist", "5"]).await; + let ttl_pre = send_cmd(&mut s, &["TTL", "exp:persist"]).await; + assert!( + ttl_pre.contains("5") + || ttl_pre.contains("4") + || ttl_pre.contains("3") + || ttl_pre.contains("2") + || ttl_pre.contains("1") + || ttl_pre.contains("0"), + "TTL exp:persist should be >=0 before persist, got: {}", + ttl_pre + ); + let persist1 = send_cmd(&mut s, &["PERSIST", "exp:persist"]).await; + assert_contains(&persist1, "1", "PERSIST should remove expiration"); + let ttl_post = send_cmd(&mut s, &["TTL", "exp:persist"]).await; + assert_contains(&ttl_post, "-1", "TTL after PERSIST -> -1 (no expiration)"); + // Second persist should return 0 (nothing to remove) + let persist2 = send_cmd(&mut s, &["PERSIST", "exp:persist"]).await; + assert_contains( + &persist2, + "0", + "PERSIST again -> 0 (no expiration to remove)", + ); } #[tokio::test] @@ -663,7 +704,11 @@ async fn test_11_set_with_options() { // SET with GET on non-existing key -> returns Null, sets value let set_get1 = send_cmd(&mut s, &["SET", "s1", "v1", "GET"]).await; - assert_contains(&set_get1, "$-1", "SET s1 v1 GET returns Null when key didn't exist"); + assert_contains( + &set_get1, + "$-1", + "SET s1 v1 GET returns Null when key didn't exist", + ); let g1 = send_cmd(&mut s, &["GET", "s1"]).await; assert_contains(&g1, "v1", "GET s1 after first SET"); @@ -707,42 +752,42 @@ async fn test_11_set_with_options() { #[tokio::test] async fn test_09_mget_mset_and_variadic_exists_del() { - let (server, port) = start_test_server("mget_mset_variadic").await; - spawn_listener(server, port).await; - sleep(Duration::from_millis(150)).await; + let (server, port) = start_test_server("mget_mset_variadic").await; + spawn_listener(server, port).await; + sleep(Duration::from_millis(150)).await; - let mut s = connect(port).await; + let mut s = connect(port).await; - // MSET multiple keys - let mset = send_cmd(&mut s, &["MSET", "k1", "v1", "k2", "v2", "k3", "v3"]).await; - assert_contains(&mset, "OK", "MSET k1 v1 k2 v2 k3 v3 -> OK"); + // MSET multiple keys + let mset = send_cmd(&mut s, &["MSET", "k1", "v1", "k2", "v2", "k3", "v3"]).await; + assert_contains(&mset, "OK", "MSET k1 v1 k2 v2 k3 v3 -> OK"); - // MGET should return values and Null for missing - let mget = send_cmd(&mut s, &["MGET", "k1", "k2", "nope", "k3"]).await; - // Expect an array with 4 entries; verify payloads - assert_contains(&mget, "v1", "MGET k1"); - assert_contains(&mget, "v2", "MGET k2"); - assert_contains(&mget, "v3", "MGET k3"); - assert_contains(&mget, "$-1", "MGET missing returns Null"); + // MGET should return values and Null for missing + let mget = send_cmd(&mut s, &["MGET", "k1", "k2", "nope", "k3"]).await; + // Expect an array with 4 entries; verify payloads + assert_contains(&mget, "v1", "MGET k1"); + assert_contains(&mget, "v2", "MGET k2"); + assert_contains(&mget, "v3", "MGET k3"); + assert_contains(&mget, "$-1", "MGET missing returns Null"); - // EXISTS variadic: count how many exist - let exists_multi = send_cmd(&mut s, &["EXISTS", "k1", "nope", "k3"]).await; - // Server returns SimpleString numeric, e.g. +2 - assert_contains(&exists_multi, "2", "EXISTS k1 nope k3 -> 2"); + // EXISTS variadic: count how many exist + let exists_multi = send_cmd(&mut s, &["EXISTS", "k1", "nope", "k3"]).await; + // Server returns SimpleString numeric, e.g. +2 + assert_contains(&exists_multi, "2", "EXISTS k1 nope k3 -> 2"); - // DEL variadic: delete multiple keys, return count deleted - let del_multi = send_cmd(&mut s, &["DEL", "k1", "k3", "nope"]).await; - assert_contains(&del_multi, "2", "DEL k1 k3 nope -> 2"); + // DEL variadic: delete multiple keys, return count deleted + let del_multi = send_cmd(&mut s, &["DEL", "k1", "k3", "nope"]).await; + assert_contains(&del_multi, "2", "DEL k1 k3 nope -> 2"); - // Verify deletion - let exists_after = send_cmd(&mut s, &["EXISTS", "k1", "k3"]).await; - assert_contains(&exists_after, "0", "EXISTS k1 k3 after DEL -> 0"); + // Verify deletion + let exists_after = send_cmd(&mut s, &["EXISTS", "k1", "k3"]).await; + assert_contains(&exists_after, "0", "EXISTS k1 k3 after DEL -> 0"); - // MGET after deletion should include Nulls for deleted keys - let mget_after = send_cmd(&mut s, &["MGET", "k1", "k2", "k3"]).await; - assert_contains(&mget_after, "$-1", "MGET k1 after DEL -> Null"); - assert_contains(&mget_after, "v2", "MGET k2 remains"); - assert_contains(&mget_after, "$-1", "MGET k3 after DEL -> Null"); + // MGET after deletion should include Nulls for deleted keys + let mget_after = send_cmd(&mut s, &["MGET", "k1", "k2", "k3"]).await; + assert_contains(&mget_after, "$-1", "MGET k1 after DEL -> Null"); + assert_contains(&mget_after, "v2", "MGET k2 remains"); + assert_contains(&mget_after, "$-1", "MGET k3 after DEL -> Null"); } #[tokio::test] async fn test_12_hash_incr() { @@ -862,9 +907,16 @@ async fn test_14_expireat_pexpireat() { let mut s = connect(port).await; // EXPIREAT: seconds since epoch - let now_secs = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() as i64; + let now_secs = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() as i64; let _ = send_cmd(&mut s, &["SET", "exp:at:s", "v"]).await; - let exat = send_cmd(&mut s, &["EXPIREAT", "exp:at:s", &format!("{}", now_secs + 1)]).await; + let exat = send_cmd( + &mut s, + &["EXPIREAT", "exp:at:s", &format!("{}", now_secs + 1)], + ) + .await; assert_contains(&exat, "1", "EXPIREAT exp:at:s now+1s -> 1 (applied)"); let ttl1 = send_cmd(&mut s, &["TTL", "exp:at:s"]).await; assert!( @@ -874,12 +926,23 @@ async fn test_14_expireat_pexpireat() { ); sleep(Duration::from_millis(1200)).await; let exists_after_exat = send_cmd(&mut s, &["EXISTS", "exp:at:s"]).await; - assert_contains(&exists_after_exat, "0", "EXISTS exp:at:s after EXPIREAT expiry -> 0"); + assert_contains( + &exists_after_exat, + "0", + "EXISTS exp:at:s after EXPIREAT expiry -> 0", + ); // PEXPIREAT: milliseconds since epoch - let now_ms = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as i64; + let now_ms = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis() as i64; let _ = send_cmd(&mut s, &["SET", "exp:at:ms", "v"]).await; - let pexat = send_cmd(&mut s, &["PEXPIREAT", "exp:at:ms", &format!("{}", now_ms + 450)]).await; + let pexat = send_cmd( + &mut s, + &["PEXPIREAT", "exp:at:ms", &format!("{}", now_ms + 450)], + ) + .await; assert_contains(&pexat, "1", "PEXPIREAT exp:at:ms now+450ms -> 1 (applied)"); let ttl2 = send_cmd(&mut s, &["TTL", "exp:at:ms"]).await; assert!( @@ -889,5 +952,9 @@ async fn test_14_expireat_pexpireat() { ); sleep(Duration::from_millis(600)).await; let exists_after_pexat = send_cmd(&mut s, &["EXISTS", "exp:at:ms"]).await; - assert_contains(&exists_after_pexat, "0", "EXISTS exp:at:ms after PEXPIREAT expiry -> 0"); -} \ No newline at end of file + assert_contains( + &exists_after_pexat, + "0", + "EXISTS exp:at:ms after PEXPIREAT expiry -> 0", + ); +}