diff --git a/Cargo.lock b/Cargo.lock index 13ac985b..6f42c4c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -94,6 +94,12 @@ version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + [[package]] name = "arraydeque" version = "0.5.1" @@ -157,6 +163,74 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bindgen" +version = "0.69.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +dependencies = [ + "bitflags 2.9.0", + "cexpr", + "clang-sys", + "itertools 0.10.5", + "lazy_static", + "lazycell", + "proc-macro2", + "quote", + "regex", + "rustc-hash 1.1.0", + "shlex", + "syn 2.0.108", +] + +[[package]] +name = "bindgen" +version = "0.71.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +dependencies = [ + "bitflags 2.9.0", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash 2.1.1", + "shlex", + "syn 2.0.108", +] + +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.9.0" @@ -217,6 +291,16 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +[[package]] +name = "bzip2-sys" +version = "0.1.13+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" +dependencies = [ + "cc", + "pkg-config", +] + [[package]] name = "cassowary" version = "0.3.0" @@ -249,6 +333,15 @@ dependencies = [ "shlex", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "1.0.4" @@ -306,6 +399,17 @@ dependencies = [ "half", ] +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "clap" version = "4.5.51" @@ -449,6 +553,34 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot 0.5.0", + "futures", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "tokio", + "walkdir", +] + [[package]] name = "criterion" version = "0.7.0" @@ -459,8 +591,8 @@ dependencies = [ "cast", "ciborium", "clap", - "criterion-plot", - "itertools", + "criterion-plot 0.6.0", + "itertools 0.13.0", "num-traits", "oorandom", "plotters", @@ -472,6 +604,16 @@ dependencies = [ "walkdir", ] +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + [[package]] name = "criterion-plot" version = "0.6.0" @@ -479,7 +621,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b1bcc0dc7dfae599d84ad0b1a55f80cde8af3725da8313b528da95ef783e338" dependencies = [ "cast", - "itertools", + "itertools 0.13.0", ] [[package]] @@ -543,7 +685,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags", + "bitflags 2.9.0", "crossterm_winapi", "futures-core", "mio", @@ -722,12 +864,44 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "env_filter" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bf3c259d255ca70051b30e2e95b5446cdb8949ac4cd22c0d7fd634d89f568e2" +dependencies = [ + "log", + "regex", +] + +[[package]] +name = "env_logger" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" +dependencies = [ + "anstream", + "anstyle", + "env_filter", + "jiff", + "log", +] + [[package]] name = "equivalent" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" +[[package]] +name = "erased-serde" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" +dependencies = [ + "serde", +] + [[package]] name = "erased-serde" version = "0.4.6" @@ -921,6 +1095,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -956,6 +1139,24 @@ dependencies = [ "wasi 0.14.2+wasi-0.2.4", ] +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.108", +] + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + [[package]] name = "h2" version = "0.4.9" @@ -1378,12 +1579,32 @@ dependencies = [ "serde", ] +[[package]] +name = "is-terminal" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.61.2", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.13.0" @@ -1399,6 +1620,30 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +[[package]] +name = "jiff" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49" +dependencies = [ + "jiff-static", + "log", + "portable-atomic", + "portable-atomic-util", + "serde", +] + +[[package]] +name = "jiff-static" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.108", +] + [[package]] name = "jobserver" version = "0.1.33" @@ -1436,20 +1681,52 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link 0.2.1", +] + [[package]] name = "libredox" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags", + "bitflags 2.9.0", + "libc", +] + +[[package]] +name = "librocksdb-sys" +version = "0.16.0+8.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" +dependencies = [ + "bindgen 0.69.5", + "bzip2-sys", + "cc", + "glob", "libc", + "libz-sys", + "lz4-sys", + "zstd-sys", ] [[package]] @@ -1518,6 +1795,16 @@ dependencies = [ "hashbrown 0.15.3", ] +[[package]] +name = "lz4-sys" +version = "1.11.1+lz4-1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "lz4_flex" version = "0.11.5" @@ -1545,6 +1832,12 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.8.8" @@ -1644,6 +1937,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "ntapi" version = "0.4.1" @@ -1693,7 +1996,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166" dependencies = [ - "bitflags", + "bitflags 2.9.0", ] [[package]] @@ -1724,7 +2027,7 @@ version = "0.10.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" dependencies = [ - "bitflags", + "bitflags 2.9.0", "cfg-if", "foreign-types", "libc", @@ -1931,6 +2234,21 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -2003,6 +2321,93 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proptest" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.9.0", + "num-traits", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.13.0", + "proc-macro2", + "quote", + "syn 2.0.108", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + +[[package]] +name = "protobuf" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" +dependencies = [ + "bytes", +] + +[[package]] +name = "protobuf-build" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2df9942df2981178a930a72d442de47e2f0df18ad68e50a30f816f1848215ad0" +dependencies = [ + "bitflags 1.3.2", + "protobuf", + "protobuf-codegen", + "regex", +] + +[[package]] +name = "protobuf-codegen" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "033460afb75cf755fcfc16dfaed20b86468082a2ea24e05ac35ab4a099a017d6" +dependencies = [ + "protobuf", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.41" @@ -2024,14 +2429,65 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "raft" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f12688b23a649902762d4c11d854d73c49c9b93138f2de16403ef9f571ad5bae" +dependencies = [ + "bytes", + "fxhash", + "getset", + "protobuf", + "raft-proto", + "rand 0.8.5", + "slog", + "slog-envlogger", + "slog-stdlog", + "slog-term", + "thiserror 1.0.69", +] + +[[package]] +name = "raft-proto" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb6884896294f553e8d5cfbdb55080b9f5f2f43394afff59c9f077e0f4b46d6b" +dependencies = [ + "bytes", + "protobuf", + "protobuf-build", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + [[package]] name = "rand" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ - "rand_chacha", - "rand_core", + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", ] [[package]] @@ -2041,7 +2497,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", ] [[package]] @@ -2053,19 +2518,28 @@ dependencies = [ "getrandom 0.3.2", ] +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + [[package]] name = "ratatui" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabd94c2f37801c20583fc49dd5cd6b0ba68c716787c2dd6ed18571e1e63117b" dependencies = [ - "bitflags", + "bitflags 2.9.0", "cassowary", "compact_str", "crossterm", "indoc", "instability", - "itertools", + "itertools 0.13.0", "lru", "paste", "strum 0.26.3", @@ -2100,7 +2574,7 @@ version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" dependencies = [ - "bitflags", + "bitflags 2.9.0", ] [[package]] @@ -2208,7 +2682,7 @@ dependencies = [ "cfg-if", "cheetah-string", "clap", - "criterion", + "criterion 0.7.0", "crossbeam-skiplist", "dashmap", "dirs", @@ -2218,7 +2692,7 @@ dependencies = [ "mockall", "num_cpus", "parking_lot", - "rand", + "rand 0.9.2", "rocketmq-client-rust", "rocketmq-common", "rocketmq-error", @@ -2260,7 +2734,7 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot", - "rand", + "rand 0.9.2", "rocketmq-common", "rocketmq-error", "rocketmq-remoting", @@ -2285,7 +2759,7 @@ dependencies = [ "chrono", "config", "crc32fast", - "criterion", + "criterion 0.7.0", "dashmap", "dirs", "flate2", @@ -2317,6 +2791,43 @@ dependencies = [ "zstd", ] +[[package]] +name = "rocketmq-controller" +version = "0.7.0" +dependencies = [ + "anyhow", + "async-trait", + "bincode", + "bytes", + "chrono", + "criterion 0.5.1", + "dashmap", + "env_logger", + "futures", + "parking_lot", + "proptest", + "prost", + "prost-types", + "protobuf", + "raft", + "raft-proto", + "rocketmq-common", + "rocketmq-remoting", + "rocketmq-runtime", + "rocketmq-rust", + "rocketmq-store", + "rocksdb", + "serde", + "serde_json", + "slog", + "tempfile", + "thiserror 2.0.17", + "tokio", + "tokio-util", + "tracing", + "tracing-subscriber", +] + [[package]] name = "rocketmq-error" version = "0.7.0" @@ -2354,7 +2865,7 @@ name = "rocketmq-macros" version = "0.7.0" dependencies = [ "cheetah-string", - "criterion", + "criterion 0.7.0", "proc-macro2", "quote", "rocketmq-error", @@ -2394,7 +2905,7 @@ dependencies = [ "bytemuck", "bytes", "cheetah-string", - "criterion", + "criterion 0.7.0", "dashmap", "flate2", "flume", @@ -2403,7 +2914,7 @@ dependencies = [ "lazy_static", "num_cpus", "parking_lot", - "rand", + "rand 0.9.2", "rocketmq-common", "rocketmq-error", "rocketmq-macros", @@ -2453,7 +2964,7 @@ dependencies = [ "anyhow", "bytes", "cheetah-string", - "criterion", + "criterion 0.7.0", "dashmap", "dirs", "futures-util", @@ -2464,7 +2975,7 @@ dependencies = [ "once_cell", "page_size", "parking_lot", - "rand", + "rand 0.9.2", "rocketmq-common", "rocketmq-error", "rocketmq-remoting", @@ -2514,6 +3025,16 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "rocksdb" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7" +dependencies = [ + "libc", + "librocksdb-sys", +] + [[package]] name = "ron" version = "0.8.1" @@ -2521,7 +3042,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ "base64 0.21.7", - "bitflags", + "bitflags 2.9.0", "serde", "serde_derive", ] @@ -2536,13 +3057,25 @@ dependencies = [ "ordered-multimap", ] +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + [[package]] name = "rustix" version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags", + "bitflags 2.9.0", "errno", "libc", "linux-raw-sys 0.4.15", @@ -2555,7 +3088,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ - "bitflags", + "bitflags 2.9.0", "errno", "libc", "linux-raw-sys 0.9.4", @@ -2598,6 +3131,18 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +[[package]] +name = "rusty-fork" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.20" @@ -2634,7 +3179,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags", + "bitflags 2.9.0", "core-foundation", "core-foundation-sys", "libc", @@ -2667,7 +3212,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9faf48a4a2d2693be24c6289dbe26552776eb7737074e6722891fadbe6c5058" dependencies = [ - "erased-serde", + "erased-serde 0.4.6", "serde", "serde_core", "typeid", @@ -2808,6 +3353,81 @@ dependencies = [ "autocfg", ] +[[package]] +name = "slog" +version = "2.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3b8565691b22d2bdfc066426ed48f837fc0c5f2c8cad8d9718f7f99d6995c1" +dependencies = [ + "anyhow", + "erased-serde 0.3.31", + "rustversion", + "serde_core", +] + +[[package]] +name = "slog-async" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72c8038f898a2c79507940990f05386455b3a317d8f18d4caea7cbc3d5096b84" +dependencies = [ + "crossbeam-channel", + "slog", + "take_mut", + "thread_local", +] + +[[package]] +name = "slog-envlogger" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "906a1a0bc43fed692df4b82a5e2fbfc3733db8dad8bb514ab27a4f23ad04f5c0" +dependencies = [ + "log", + "regex", + "slog", + "slog-async", + "slog-scope", + "slog-stdlog", + "slog-term", +] + +[[package]] +name = "slog-scope" +version = "4.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f95a4b4c3274cd2869549da82b57ccc930859bdbf5bcea0424bc5f140b3c786" +dependencies = [ + "arc-swap", + "lazy_static", + "slog", +] + +[[package]] +name = "slog-stdlog" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6706b2ace5bbae7291d3f8d2473e2bfab073ccd7d03670946197aec98471fa3e" +dependencies = [ + "log", + "slog", + "slog-scope", +] + +[[package]] +name = "slog-term" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cb1fc680b38eed6fad4c02b3871c09d2c81db8c96aa4e9c0a34904c830f09b5" +dependencies = [ + "chrono", + "is-terminal", + "slog", + "term", + "thread_local", + "time", +] + [[package]] name = "smallvec" version = "1.15.0" @@ -2972,7 +3592,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags", + "bitflags 2.9.0", "core-foundation", "system-configuration-sys", ] @@ -3011,6 +3631,12 @@ dependencies = [ "syn 2.0.108", ] +[[package]] +name = "take_mut" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" + [[package]] name = "tap" version = "1.0.1" @@ -3030,6 +3656,15 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "term" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2111ef44dae28680ae9752bb89409e7310ca33a8c621ebe7b106cf5c928b3ac0" +dependencies = [ + "windows-sys 0.61.2", +] + [[package]] name = "termtree" version = "0.5.1" @@ -3297,7 +3932,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cc2d9e086a412a451384326f521c8123a99a466b329941a9403696bff9b0da2" dependencies = [ - "bitflags", + "bitflags 2.9.0", "bytes", "futures-util", "http", @@ -3425,6 +4060,12 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicode-ident" version = "1.0.18" @@ -3443,7 +4084,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" dependencies = [ - "itertools", + "itertools 0.13.0", "unicode-segmentation", "unicode-width 0.1.14", ] @@ -3504,7 +4145,7 @@ checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ "getrandom 0.3.2", "js-sys", - "rand", + "rand 0.9.2", "uuid-macro-internal", "wasm-bindgen", ] @@ -3538,6 +4179,15 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.5.0" @@ -4148,7 +4798,7 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags", + "bitflags 2.9.0", ] [[package]] @@ -4300,6 +4950,7 @@ version = "2.0.15+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" dependencies = [ + "bindgen 0.71.1", "cc", "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 1ca46d2d..c926fc40 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "rocketmq-cli", "rocketmq-client", "rocketmq-common", + "rocketmq-controller", "rocketmq-error", "rocketmq-example", "rocketmq-filter", diff --git a/rocketmq-controller/Cargo.toml b/rocketmq-controller/Cargo.toml new file mode 100644 index 00000000..ba707fc0 --- /dev/null +++ b/rocketmq-controller/Cargo.toml @@ -0,0 +1,79 @@ +[package] +name = "rocketmq-controller" +version.workspace = true +authors.workspace = true +edition.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme = "README.md" +description = "RocketMQ Controller Module - High Availability Raft-based Controller" +keywords = ["rocketmq", "controller", "raft", "distributed", "messaging"] +categories = ["network-programming", "asynchronous"] + +[dependencies] +# Core async runtime +tokio = { workspace = true } +tokio-util = { workspace = true } + +# Raft consensus algorithm +raft = "0.7" +raft-proto = "0.7" +prost = "0.13" +protobuf = "2.28" +slog = { version = "2.7", features = ["max_level_trace", "release_max_level_info"] } + +# Serialization +serde = { workspace = true } +serde_json = { workspace = true } +bincode = "1.3" + +# Networking and protocols +bytes = { workspace = true } +prost-types = "0.13" +futures = "0.3" + +# Concurrent data structures +dashmap = { workspace = true } +parking_lot = { workspace = true } + +# Error handling +thiserror = { workspace = true } +anyhow = { workspace = true } + +# Logging and tracing +tracing = { workspace = true } +tracing-subscriber = { workspace = true } + +# Time utilities +chrono = "0.4" + +# Async trait support +async-trait = "0.1" + +# Storage backends +rocksdb = { version = "0.22", optional = true } + +# Internal dependencies (rocketmq-rust modules) +rocketmq-common = { workspace = true } +rocketmq-remoting = { workspace = true } +rocketmq-runtime = { workspace = true } +rocketmq-store = { workspace = true } +rocketmq-rust = { workspace = true } + +[dev-dependencies] +criterion = { version = "0.5", features = ["async_tokio"] } +tempfile = "3.10" +env_logger = "0.11" +proptest = "1.4" + +[[bench]] +name = "controller_bench" +harness = false + +[features] +default = ["storage-file"] +storage-rocksdb = ["rocksdb"] +storage-file = [] +metrics = [] +debug = [] diff --git a/rocketmq-controller/README.md b/rocketmq-controller/README.md new file mode 100644 index 00000000..5fac9711 --- /dev/null +++ b/rocketmq-controller/README.md @@ -0,0 +1,148 @@ +# RocketMQ Controller + +RocketMQ Controller Module - High Availability Controller based on Raft + +## Introduction + +RocketMQ Controller is the core management component of RocketMQ cluster, responsible for: + +- **Cluster Metadata Management**: Broker registration, Topic configuration, cluster configuration, etc. +- **High Availability**: Master-slave failover based on Raft consensus algorithm +- **Leader Election**: Automatic leader node election and failover +- **Data Consistency**: Ensures strong data consistency through Raft log replication + +## Architecture + +``` +┌──────────────────────────────────────────┐ +│ Controller Manager │ +├──────────────────────────────────────────┤ +│ │ +│ ┌────────────┐ ┌────────────────────┐ │ +│ │ Raft │ │ Metadata Store │ │ +│ │ Controller │ │ │ │ +│ │ │ │ - Broker Manager │ │ +│ │ - Election │ │ - Topic Manager │ │ +│ │ - Replica │ │ - Config Manager │ │ +│ └────────────┘ └────────────────────┘ │ +│ │ +│ ┌────────────────────────────────────┐ │ +│ │ Processor Manager │ │ +│ │ │ │ +│ │ - Register Broker │ │ +│ │ - Heartbeat │ │ +│ │ - Create/Update Topic │ │ +│ │ - Query Metadata │ │ +│ └────────────────────────────────────┘ │ +└──────────────────────────────────────────┘ +``` + +## Features + +### ✅ Implemented +- Basic project structure +- Configuration management (ControllerConfig) +- Error handling (ControllerError) +- Raft controller framework +- Metadata storage (Broker, Topic, Config) +- Processor manager framework + +### 🚧 In Progress +- Complete Raft node implementation +- Network communication layer +- RPC processor implementation + +### 📋 Planned +- Persistent storage (RocksDB/custom logging) +- Snapshot management +- Complete integration tests +- Performance benchmarks +- Monitoring metrics + +## Quick Start + +### Basic Usage + +```rust +use rocketmq_controller::*; + +#[tokio::main] +async fn main() -> Result<()> { + // Create configuration + let config = ControllerConfig::new( + 1, // node_id + "127.0.0.1:9876".parse().unwrap() + ) + .with_raft_peers(vec![ + RaftPeer { id: 1, addr: "127.0.0.1:9876".parse().unwrap() }, + RaftPeer { id: 2, addr: "127.0.0.1:9877".parse().unwrap() }, + RaftPeer { id: 3, addr: "127.0.0.1:9878".parse().unwrap() }, + ]) + .with_storage_path("/data/controller".into()); + + // Create and start Controller + let manager = ControllerManager::new(config).await?; + manager.start().await?; + + // Wait... + + // Graceful shutdown + manager.shutdown().await?; + Ok(()) +} +``` + +## Dependencies + +Main dependencies: + +- `raft-rs` - Raft consensus algorithm implementation +- `tokio` - Async runtime +- `dashmap` - Concurrent hash map +- `serde` - Serialization/deserialization +- `tracing` - Logging and tracing + +## Development + +### Build + +```bash +cargo build -p rocketmq-controller +``` + +### 测试 + +```bash +cargo test -p rocketmq-controller +``` + +### Benchmark + +```bash +cargo bench -p rocketmq-controller +``` + +## Comparison with Java Version + +| Feature | Java (DLedger) | Rust (raft-rs) | +|---------|---------------|----------------| +| Consensus Algorithm | DLedger | raft-rs | +| Async Model | Netty | Tokio | +| Concurrency Control | ConcurrentHashMap | DashMap | +| Error Handling | Exceptions | Result | +| Type Safety | Runtime | Compile-time | + +## Performance Goals + +- Leader election latency: < 500ms +- Heartbeat throughput: > 10,000 ops/s +- Metadata write latency: < 10ms (p99) +- Metadata read latency: < 1ms (p99) + +## Contributing + +Contributions are welcome! Please see [CONTRIBUTING.md](../CONTRIBUTING.md). + +## License + +Licensed under Apache License 2.0 or MIT license, at your option. diff --git a/rocketmq-controller/benches/controller_bench.rs b/rocketmq-controller/benches/controller_bench.rs new file mode 100644 index 00000000..907722f6 --- /dev/null +++ b/rocketmq-controller/benches/controller_bench.rs @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use criterion::criterion_group; +use criterion::criterion_main; +use criterion::Criterion; + +/// Controller benchmark placeholder +/// +/// TODO: Implement actual benchmarks: +/// - Leader election latency +/// - Heartbeat processing throughput +/// - Metadata operation latency +/// - Raft log append performance +fn controller_bench(c: &mut Criterion) { + c.bench_function("placeholder", |b| { + b.iter(|| { + // Placeholder benchmark + 1 + 1 + }) + }); +} + +criterion_group!(benches, controller_bench); +criterion_main!(benches); diff --git a/rocketmq-controller/src/config.rs b/rocketmq-controller/src/config.rs new file mode 100644 index 00000000..3caf7e3b --- /dev/null +++ b/rocketmq-controller/src/config.rs @@ -0,0 +1,210 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::net::SocketAddr; +use std::path::PathBuf; + +use serde::Deserialize; +use serde::Serialize; + +use crate::error::ControllerError; +use crate::error::Result; +use crate::storage::StorageConfig; + +/// Raft peer configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RaftPeer { + /// Node ID + pub id: u64, + + /// Peer address + pub addr: SocketAddr, +} + +/// Storage backend type +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum StorageBackendType { + /// RocksDB storage + RocksDB, + + /// File-based storage + File, + + /// In-memory storage (for testing) + Memory, +} + +/// Controller configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ControllerConfig { + /// Node ID + pub node_id: u64, + + /// Listen address + pub listen_addr: SocketAddr, + + /// Raft peer list + pub raft_peers: Vec, + + /// Storage path + pub storage_path: PathBuf, + + /// Storage backend type + pub storage_backend: StorageBackendType, + + /// Election timeout in milliseconds + pub election_timeout_ms: u64, + + /// Heartbeat interval in milliseconds + pub heartbeat_interval_ms: u64, +} + +impl ControllerConfig { + /// Create a new controller configuration + pub fn new(node_id: u64, listen_addr: SocketAddr) -> Self { + Self { + node_id, + listen_addr, + raft_peers: Vec::new(), + storage_path: PathBuf::from("/tmp/rocketmq-controller"), + storage_backend: StorageBackendType::RocksDB, + election_timeout_ms: 1000, + heartbeat_interval_ms: 300, + } + } + + /// Set Raft peers + pub fn with_raft_peers(mut self, peers: Vec) -> Self { + self.raft_peers = peers; + self + } + + /// Set storage path + pub fn with_storage_path(mut self, path: PathBuf) -> Self { + self.storage_path = path; + self + } + + /// Set storage backend + pub fn with_storage_backend(mut self, backend: StorageBackendType) -> Self { + self.storage_backend = backend; + self + } + + /// Set election timeout + pub fn with_election_timeout_ms(mut self, timeout_ms: u64) -> Self { + self.election_timeout_ms = timeout_ms; + self + } + + /// Set heartbeat interval + pub fn with_heartbeat_interval_ms(mut self, interval_ms: u64) -> Self { + self.heartbeat_interval_ms = interval_ms; + self + } + + /// Validate the configuration + pub fn validate(&self) -> Result<()> { + if self.node_id == 0 { + return Err(ControllerError::ConfigError( + "Node ID cannot be 0".to_string(), + )); + } + + if self.election_timeout_ms == 0 { + return Err(ControllerError::ConfigError( + "Election timeout cannot be 0".to_string(), + )); + } + + if self.heartbeat_interval_ms == 0 { + return Err(ControllerError::ConfigError( + "Heartbeat interval cannot be 0".to_string(), + )); + } + + if self.heartbeat_interval_ms >= self.election_timeout_ms { + return Err(ControllerError::ConfigError( + "Heartbeat interval must be less than election timeout".to_string(), + )); + } + + Ok(()) + } + + /// Convert to storage configuration + pub fn to_storage_config(&self) -> StorageConfig { + match self.storage_backend { + #[cfg(feature = "storage-rocksdb")] + StorageBackendType::RocksDB => StorageConfig::RocksDB { + path: self.storage_path.join("rocksdb"), + }, + + #[cfg(feature = "storage-file")] + StorageBackendType::File => StorageConfig::File { + path: self.storage_path.join("filedb"), + }, + + StorageBackendType::Memory => StorageConfig::Memory, + + #[allow(unreachable_patterns)] + _ => StorageConfig::Memory, + } + } + + /// Create a test configuration (for testing only) + #[cfg(test)] + pub fn test_config() -> Self { + Self { + node_id: 1, + listen_addr: "127.0.0.1:29876".parse().unwrap(), + raft_peers: vec![RaftPeer { + id: 1, + addr: "127.0.0.1:29876".parse().unwrap(), + }], + storage_path: std::path::PathBuf::from("/tmp/controller_test"), + storage_backend: StorageBackendType::Memory, + election_timeout_ms: 1000, + heartbeat_interval_ms: 300, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_config_builder() { + let config = ControllerConfig::new(1, "127.0.0.1:9876".parse().unwrap()) + .with_election_timeout_ms(2000) + .with_heartbeat_interval_ms(600); + + assert_eq!(config.node_id, 1); + assert_eq!(config.election_timeout_ms, 2000); + assert_eq!(config.heartbeat_interval_ms, 600); + } + + #[test] + fn test_config_validation() { + let config = ControllerConfig::new(1, "127.0.0.1:9876".parse().unwrap()); + assert!(config.validate().is_ok()); + + let invalid_config = ControllerConfig::new(0, "127.0.0.1:9876".parse().unwrap()); + assert!(invalid_config.validate().is_err()); + } +} diff --git a/rocketmq-controller/src/error.rs b/rocketmq-controller/src/error.rs new file mode 100644 index 00000000..06f97e1a --- /dev/null +++ b/rocketmq-controller/src/error.rs @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::io; + +use thiserror::Error; + +/// Controller module error types +#[derive(Debug, Error)] +pub enum ControllerError { + /// IO errors + #[error("IO error: {0}")] + Io(#[from] io::Error), + + /// Raft consensus errors + #[error("Raft error: {0}")] + Raft(String), + + /// Not the leader error + #[error("Not leader, current leader is: {}", leader_id.map(|id| id.to_string()).unwrap_or_else(|| "unknown".to_string()))] + NotLeader { leader_id: Option }, + + /// Metadata not found + #[error("Metadata not found: {key}")] + MetadataNotFound { key: String }, + + /// Invalid request + #[error("Invalid request: {0}")] + InvalidRequest(String), + + /// Broker registration error + #[error("Broker registration failed: {0}")] + BrokerRegistrationFailed(String), + + /// Configuration error + #[error("Configuration error: {0}")] + ConfigError(String), + + /// Serialization error + #[error("Serialization error: {0}")] + SerializationError(String), + + /// Storage error + #[error("Storage error: {0}")] + StorageError(String), + + /// Network error + #[error("Network error: {0}")] + NetworkError(String), + + /// Timeout error + #[error("Operation timeout after {timeout_ms}ms")] + Timeout { timeout_ms: u64 }, + + /// Internal error + #[error("Internal error: {0}")] + Internal(String), + + /// Shutdown error + #[error("Controller is shutting down")] + Shutdown, +} + +impl From for ControllerError { + fn from(e: serde_json::Error) -> Self { + ControllerError::SerializationError(e.to_string()) + } +} + +impl From for ControllerError { + fn from(e: bincode::Error) -> Self { + ControllerError::SerializationError(e.to_string()) + } +} + +impl From for ControllerError { + fn from(e: raft::Error) -> Self { + ControllerError::Raft(format!("{:?}", e)) + } +} + +/// Result type alias for Controller operations +pub type Result = std::result::Result; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_error_display() { + let err = ControllerError::NotLeader { leader_id: Some(1) }; + assert!(err.to_string().contains("Not leader")); + } + + #[test] + fn test_error_conversion() { + let io_err = io::Error::new(io::ErrorKind::Other, "test"); + let controller_err: ControllerError = io_err.into(); + assert!(matches!(controller_err, ControllerError::Io(_))); + } +} diff --git a/rocketmq-controller/src/lib.rs b/rocketmq-controller/src/lib.rs new file mode 100644 index 00000000..141abea6 --- /dev/null +++ b/rocketmq-controller/src/lib.rs @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! # RocketMQ Controller Module +//! +//! High-availability controller implementation for RocketMQ, providing: +//! - Raft-based consensus for leader election and metadata replication +//! - Broker registration and heartbeat management +//! - Topic metadata management and synchronization +//! - Configuration management across the cluster +//! +//! ## Architecture +//! +//! ```text +//! ┌─────────────────────────────────────────┐ +//! │ Controller Manager (Entry) │ +//! └──────────────┬──────────────────────────┘ +//! │ +//! ┌───────┴────────┐ +//! │ │ +//! ┌──────▼──────┐ ┌─────▼──────┐ +//! │ Raft Module │ │ Processor │ +//! │ (raft-rs) │ │ Layer │ +//! └──────┬──────┘ └─────┬──────┘ +//! │ │ +//! └────────┬───────┘ +//! │ +//! ┌────────▼─────────┐ +//! │ Metadata Store │ +//! │ (DashMap/Raft) │ +//! └──────────────────┘ +//! ``` +//! +//! ## Usage +//! +//! ```rust,ignore +//! use rocketmq_controller::ControllerConfig; +//! use rocketmq_controller::ControllerManager; +//! +//! #[tokio::main] +//! async fn main() -> anyhow::Result<()> { +//! let config = ControllerConfig::default(); +//! let mut controller = ControllerManager::new(config)?; +//! +//! controller.start().await?; +//! +//! // Controller is now running... +//! +//! controller.shutdown().await?; +//! Ok(()) +//! } +//! ``` + +#![warn(rust_2018_idioms)] +#![warn(clippy::all)] +#![allow(dead_code)] +#![allow(clippy::module_inception)] + +pub mod config; +pub mod error; +pub mod manager; +pub mod metadata; +pub mod processor; +pub mod raft; +pub mod rpc; +pub mod storage; + +pub use config::ControllerConfig; +pub use error::ControllerError; +pub use error::Result; +pub use manager::ControllerManager; + +/// Controller module version +pub const VERSION: &str = env!("CARGO_PKG_VERSION"); + +/// Default controller listen port +pub const DEFAULT_CONTROLLER_PORT: u16 = 9878; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_version() { + assert!(!VERSION.is_empty()); + } +} diff --git a/rocketmq-controller/src/manager.rs b/rocketmq-controller/src/manager.rs new file mode 100644 index 00000000..5eb4b0fd --- /dev/null +++ b/rocketmq-controller/src/manager.rs @@ -0,0 +1,217 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::sync::Arc; + +use tokio::sync::RwLock; +use tracing::error; +use tracing::info; +use tracing::warn; + +use crate::config::ControllerConfig; +use crate::error::Result; +use crate::metadata::MetadataStore; +use crate::processor::ProcessorManager; +use crate::raft::RaftController; +use crate::rpc::RpcServer; + +/// Main controller manager +/// +/// This is the central component that coordinates all controller operations. +/// It manages: +/// - Raft consensus layer +/// - Metadata storage +/// - Request processing +/// - RPC server +/// - Lifecycle management +pub struct ControllerManager { + /// Configuration + config: Arc, + + /// Raft controller + raft: Arc, + + /// Metadata store + metadata: Arc, + + /// Request processor + processor: Arc, + + /// RPC server + rpc_server: Arc, + + /// Running state + running: Arc>, +} + +impl ControllerManager { + /// Create a new controller manager + pub async fn new(config: ControllerConfig) -> Result { + let config = Arc::new(config); + + info!("Initializing controller manager with config: {:?}", config); + + // Initialize Raft controller + let raft = Arc::new(RaftController::new(config.clone()).await?); + + // Initialize metadata store + let metadata = Arc::new(MetadataStore::new(config.clone()).await?); + + // Initialize processor manager + let processor = Arc::new(ProcessorManager::new( + config.clone(), + raft.clone(), + metadata.clone(), + )); + + // Initialize RPC server + let rpc_server = Arc::new(RpcServer::new(config.listen_addr, processor.clone())); + + Ok(Self { + config, + raft, + metadata, + processor, + rpc_server, + running: Arc::new(RwLock::new(false)), + }) + } + + /// Start the controller + pub async fn start(&self) -> Result<()> { + let mut running = self.running.write().await; + if *running { + warn!("Controller is already running"); + return Ok(()); + } + + info!("Starting controller manager..."); + + // Start Raft controller + self.raft.start().await?; + + // Start metadata store + self.metadata.start().await?; + + // Start processor manager + self.processor.start().await?; + + // Start RPC server + self.rpc_server.start().await?; + + *running = true; + info!("Controller manager started successfully"); + + Ok(()) + } + + /// Shutdown the controller + pub async fn shutdown(&self) -> Result<()> { + let mut running = self.running.write().await; + if !*running { + warn!("Controller is not running"); + return Ok(()); + } + + info!("Shutting down controller manager..."); + + // Shutdown RPC server first to stop accepting requests + if let Err(e) = self.rpc_server.shutdown().await { + error!("Failed to shutdown RPC server: {}", e); + } + + // Shutdown processor + if let Err(e) = self.processor.shutdown().await { + error!("Failed to shutdown processor: {}", e); + } + + // Shutdown metadata store + if let Err(e) = self.metadata.shutdown().await { + error!("Failed to shutdown metadata store: {}", e); + } + + // Shutdown Raft last + if let Err(e) = self.raft.shutdown().await { + error!("Failed to shutdown Raft controller: {}", e); + } + + *running = false; + info!("Controller manager shut down successfully"); + + Ok(()) + } + + /// Check if this node is the leader + pub async fn is_leader(&self) -> bool { + self.raft.is_leader().await + } + + /// Get the current leader ID + pub async fn get_leader(&self) -> Option { + self.raft.get_leader().await + } + + /// Check if the controller is running + pub async fn is_running(&self) -> bool { + *self.running.read().await + } + + /// Get the Raft controller + pub fn raft(&self) -> &Arc { + &self.raft + } + + /// Get the metadata store + pub fn metadata(&self) -> &Arc { + &self.metadata + } + + /// Get the processor manager + pub fn processor(&self) -> &Arc { + &self.processor + } +} + +impl Drop for ControllerManager { + fn drop(&mut self) { + // Best effort shutdown on drop + let running = self.running.clone(); + let processor = self.processor.clone(); + let metadata = self.metadata.clone(); + let raft = self.raft.clone(); + + tokio::spawn(async move { + let is_running = *running.read().await; + if is_running { + warn!("Controller manager dropped while running, performing emergency shutdown"); + let _ = processor.shutdown().await; + let _ = metadata.shutdown().await; + let _ = raft.shutdown().await; + } + }); + } +} + +#[cfg(test)] +mod tests { + #[tokio::test] + async fn test_manager_lifecycle() { + // This is a placeholder test + // Real tests will be added after implementing the dependencies + assert!(true); + } +} diff --git a/rocketmq-controller/src/metadata/broker.rs b/rocketmq-controller/src/metadata/broker.rs new file mode 100644 index 00000000..86e499ea --- /dev/null +++ b/rocketmq-controller/src/metadata/broker.rs @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; +use std::time::SystemTime; + +use dashmap::DashMap; +use serde::Deserialize; +use serde::Serialize; +use tokio::time; +use tracing::debug; +use tracing::info; +use tracing::warn; + +use crate::config::ControllerConfig; +use crate::error::ControllerError; +use crate::error::Result; + +/// Broker information +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BrokerInfo { + /// Broker name + pub name: String, + + /// Broker ID + pub broker_id: u64, + + /// Cluster name + pub cluster_name: String, + + /// Broker address + pub addr: SocketAddr, + + /// Last heartbeat time + pub last_heartbeat: SystemTime, + + /// Broker version + pub version: String, + + /// Broker role (MASTER, SLAVE) + pub role: BrokerRole, + + /// Additional metadata + pub metadata: serde_json::Value, +} + +/// Broker role +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum BrokerRole { + Master, + Slave, +} + +/// Broker manager +pub struct BrokerManager { + /// Registered brokers: broker_name -> BrokerInfo + brokers: Arc>, + + /// Configuration + config: Arc, + + /// Heartbeat timeout duration + heartbeat_timeout: Duration, +} + +impl BrokerManager { + /// Create a new broker manager + pub fn new(config: Arc) -> Self { + Self { + brokers: Arc::new(DashMap::new()), + config, + heartbeat_timeout: Duration::from_secs(30), + } + } + + /// Start the broker manager + pub async fn start(&self) -> Result<()> { + info!("Starting broker manager"); + + // Start heartbeat checker + let brokers = self.brokers.clone(); + let timeout = self.heartbeat_timeout; + tokio::spawn(async move { + let mut interval = time::interval(Duration::from_secs(5)); + loop { + interval.tick().await; + Self::check_heartbeats(&brokers, timeout); + } + }); + + Ok(()) + } + + /// Shutdown the broker manager + pub async fn shutdown(&self) -> Result<()> { + info!("Shutting down broker manager"); + self.brokers.clear(); + Ok(()) + } + + /// Register a broker + pub async fn register(&self, info: BrokerInfo) -> Result<()> { + info!("Registering broker: {} ({})", info.name, info.addr); + + // Validate broker info + if info.name.is_empty() { + return Err(ControllerError::InvalidRequest( + "Broker name cannot be empty".to_string(), + )); + } + + // Update broker info + self.brokers.insert(info.name.clone(), info); + + Ok(()) + } + + /// Unregister a broker + pub async fn unregister(&self, broker_name: &str) -> Result<()> { + info!("Unregistering broker: {}", broker_name); + + self.brokers + .remove(broker_name) + .ok_or_else(|| ControllerError::MetadataNotFound { + key: broker_name.to_string(), + })?; + + Ok(()) + } + + /// Update broker heartbeat + pub async fn heartbeat(&self, broker_name: &str) -> Result<()> { + debug!("Heartbeat from broker: {}", broker_name); + + let mut broker = + self.brokers + .get_mut(broker_name) + .ok_or_else(|| ControllerError::MetadataNotFound { + key: broker_name.to_string(), + })?; + + broker.last_heartbeat = SystemTime::now(); + + Ok(()) + } + + /// Get broker information + pub async fn get_broker(&self, broker_name: &str) -> Result { + self.brokers + .get(broker_name) + .map(|entry| entry.value().clone()) + .ok_or_else(|| ControllerError::MetadataNotFound { + key: broker_name.to_string(), + }) + } + + /// List all brokers + pub async fn list_brokers(&self) -> Vec { + self.brokers + .iter() + .map(|entry| entry.value().clone()) + .collect() + } + + /// List brokers by cluster + pub async fn list_brokers_by_cluster(&self, cluster_name: &str) -> Vec { + self.brokers + .iter() + .filter(|entry| entry.value().cluster_name == cluster_name) + .map(|entry| entry.value().clone()) + .collect() + } + + /// Check heartbeats and remove stale brokers + fn check_heartbeats(brokers: &DashMap, timeout: Duration) { + let now = SystemTime::now(); + let mut to_remove = Vec::new(); + + for entry in brokers.iter() { + let broker = entry.value(); + if let Ok(elapsed) = now.duration_since(broker.last_heartbeat) { + if elapsed > timeout { + warn!( + "Broker {} heartbeat timeout, removing (last: {:?})", + broker.name, elapsed + ); + to_remove.push(broker.name.clone()); + } + } + } + + for name in to_remove { + brokers.remove(&name); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_broker_registration() { + let config = Arc::new(ControllerConfig::test_config()); + + let manager = BrokerManager::new(config); + + let info = BrokerInfo { + name: "broker-a".to_string(), + broker_id: 0, + cluster_name: "DefaultCluster".to_string(), + addr: "127.0.0.1:10911".parse().unwrap(), + last_heartbeat: SystemTime::now(), + version: "5.0.0".to_string(), + role: BrokerRole::Master, + metadata: serde_json::json!({}), + }; + + assert!(manager.register(info.clone()).await.is_ok()); + assert!(manager.get_broker("broker-a").await.is_ok()); + } +} diff --git a/rocketmq-controller/src/metadata/config.rs b/rocketmq-controller/src/metadata/config.rs new file mode 100644 index 00000000..cc84ce16 --- /dev/null +++ b/rocketmq-controller/src/metadata/config.rs @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::sync::Arc; + +use dashmap::DashMap; +use serde::Deserialize; +use serde::Serialize; +use tracing::info; + +use crate::config::ControllerConfig; +use crate::error::ControllerError; +use crate::error::Result; + +/// Configuration information +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConfigInfo { + /// Config key + pub key: String, + + /// Config value + pub value: String, + + /// Config description + pub description: Option, +} + +/// Configuration manager +pub struct ConfigManager { + /// Configurations: key -> ConfigInfo + configs: Arc>, + + /// Controller configuration + #[allow(dead_code)] + config: Arc, +} + +impl ConfigManager { + /// Create a new config manager + pub fn new(config: Arc) -> Self { + Self { + configs: Arc::new(DashMap::new()), + config, + } + } + + /// Start the config manager + pub async fn start(&self) -> Result<()> { + info!("Starting config manager"); + Ok(()) + } + + /// Shutdown the config manager + pub async fn shutdown(&self) -> Result<()> { + info!("Shutting down config manager"); + self.configs.clear(); + Ok(()) + } + + /// Set a configuration + pub async fn set_config(&self, info: ConfigInfo) -> Result<()> { + info!("Setting config: {} = {}", info.key, info.value); + + if info.key.is_empty() { + return Err(ControllerError::InvalidRequest( + "Config key cannot be empty".to_string(), + )); + } + + self.configs.insert(info.key.clone(), info); + Ok(()) + } + + /// Get a configuration + pub async fn get_config(&self, key: &str) -> Result { + self.configs + .get(key) + .map(|entry| entry.value().clone()) + .ok_or_else(|| ControllerError::MetadataNotFound { + key: key.to_string(), + }) + } + + /// Delete a configuration + pub async fn delete_config(&self, key: &str) -> Result<()> { + info!("Deleting config: {}", key); + + self.configs + .remove(key) + .ok_or_else(|| ControllerError::MetadataNotFound { + key: key.to_string(), + })?; + + Ok(()) + } + + /// List all configurations + pub async fn list_configs(&self) -> Vec { + self.configs + .iter() + .map(|entry| entry.value().clone()) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_config_management() { + let config = Arc::new(ControllerConfig::test_config()); + + let manager = ConfigManager::new(config); + + let info = ConfigInfo { + key: "test.key".to_string(), + value: "test.value".to_string(), + description: Some("Test configuration".to_string()), + }; + + assert!(manager.set_config(info.clone()).await.is_ok()); + assert!(manager.get_config("test.key").await.is_ok()); + } +} diff --git a/rocketmq-controller/src/metadata/mod.rs b/rocketmq-controller/src/metadata/mod.rs new file mode 100644 index 00000000..3c61e8b3 --- /dev/null +++ b/rocketmq-controller/src/metadata/mod.rs @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod broker; +mod config; +mod replica; +mod topic; + +use std::sync::Arc; + +pub use broker::BrokerInfo; +pub use broker::BrokerManager; +pub use broker::BrokerRole; +pub use config::ConfigInfo; +pub use config::ConfigManager; +pub use replica::BrokerReplicaInfo; +pub use replica::ReplicaRole; +pub use replica::ReplicasManager; +pub use replica::SyncStateSet; +pub use topic::TopicConfig; +pub use topic::TopicInfo; +pub use topic::TopicManager; +use tracing::info; + +use crate::config::ControllerConfig; +use crate::error::Result; + +/// Metadata store +/// +/// This component manages all metadata for the controller: +/// - Broker registration and heartbeat +/// - Topic configuration +/// - Controller configuration +/// - Replica and ISR management +/// +/// All metadata is replicated through Raft for consistency. +pub struct MetadataStore { + /// Broker manager + broker_manager: Arc, + + /// Topic manager + topic_manager: Arc, + + /// Config manager + config_manager: Arc, + + /// Replicas manager + replicas_manager: Arc, +} + +impl MetadataStore { + /// Create a new metadata store + pub async fn new(config: Arc) -> Result { + info!("Initializing metadata store"); + + let broker_manager = Arc::new(BrokerManager::new(config.clone())); + let topic_manager = Arc::new(TopicManager::new(config.clone())); + let config_manager = Arc::new(ConfigManager::new(config.clone())); + let replicas_manager = Arc::new(ReplicasManager::new(config)); + + Ok(Self { + broker_manager, + topic_manager, + config_manager, + replicas_manager, + }) + } + + /// Start the metadata store + pub async fn start(&self) -> Result<()> { + info!("Starting metadata store"); + self.broker_manager.start().await?; + self.topic_manager.start().await?; + self.config_manager.start().await?; + self.replicas_manager.start().await?; + Ok(()) + } + + /// Shutdown the metadata store + pub async fn shutdown(&self) -> Result<()> { + info!("Shutting down metadata store"); + self.broker_manager.shutdown().await?; + self.topic_manager.shutdown().await?; + self.config_manager.shutdown().await?; + self.replicas_manager.shutdown().await?; + Ok(()) + } + + /// Get the broker manager + pub fn broker_manager(&self) -> &Arc { + &self.broker_manager + } + + /// Get the topic manager + pub fn topic_manager(&self) -> &Arc { + &self.topic_manager + } + + /// Get the config manager + pub fn config_manager(&self) -> &Arc { + &self.config_manager + } + + /// Get the replicas manager + pub fn replicas_manager(&self) -> &Arc { + &self.replicas_manager + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_metadata_store() { + // Placeholder test + assert!(true); + } +} diff --git a/rocketmq-controller/src/metadata/replica.rs b/rocketmq-controller/src/metadata/replica.rs new file mode 100644 index 00000000..8e68797d --- /dev/null +++ b/rocketmq-controller/src/metadata/replica.rs @@ -0,0 +1,596 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashMap; +use std::sync::Arc; +use std::time::SystemTime; + +use dashmap::DashMap; +use serde::Deserialize; +use serde::Serialize; +use tracing::debug; +use tracing::info; +use tracing::warn; + +use crate::config::ControllerConfig; +use crate::error::Result; + +/// Replica role +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum ReplicaRole { + /// Master replica + Master, + /// Slave replica + Slave, +} + +/// Broker replica information +/// +/// Tracks the state of a broker replica including: +/// - Role (master/slave) +/// - Epoch (version number for master election) +/// - Sync state (whether replica is in-sync) +/// - Offset tracking +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BrokerReplicaInfo { + /// Cluster name + pub cluster_name: String, + + /// Broker name (identifies the broker set) + pub broker_name: String, + + /// Broker ID (0 for master, >0 for slaves) + pub broker_id: u64, + + /// Broker address + pub broker_addr: String, + + /// Replica role + pub role: ReplicaRole, + + /// Epoch number (incremented on master election) + pub epoch: u64, + + /// Maximum offset + pub max_offset: i64, + + /// Last sync timestamp + pub last_sync_timestamp: u64, + + /// Whether this replica is in-sync + pub in_sync: bool, +} + +impl BrokerReplicaInfo { + /// Create a new master replica + pub fn new_master( + cluster_name: String, + broker_name: String, + broker_id: u64, + broker_addr: String, + epoch: u64, + ) -> Self { + Self { + cluster_name, + broker_name, + broker_id, + broker_addr, + role: ReplicaRole::Master, + epoch, + max_offset: 0, + last_sync_timestamp: SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(), + in_sync: true, + } + } + + /// Create a new slave replica + pub fn new_slave( + cluster_name: String, + broker_name: String, + broker_id: u64, + broker_addr: String, + ) -> Self { + Self { + cluster_name, + broker_name, + broker_id, + broker_addr, + role: ReplicaRole::Slave, + epoch: 0, + max_offset: 0, + last_sync_timestamp: SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(), + in_sync: false, + } + } + + /// Check if this is a master replica + pub fn is_master(&self) -> bool { + self.role == ReplicaRole::Master + } + + /// Check if this replica is in-sync + pub fn is_in_sync(&self) -> bool { + self.in_sync + } + + /// Get replica ID (broker_name:broker_id) + pub fn replica_id(&self) -> String { + format!("{}:{}", self.broker_name, self.broker_id) + } +} + +/// Sync state set (In-Sync Replicas) +/// +/// Tracks the set of replicas that are considered in-sync with the master. +/// Similar to Kafka's ISR (In-Sync Replica) concept. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SyncStateSet { + /// Broker name (identifies the broker set) + pub broker_name: String, + + /// Master broker ID + pub master_broker_id: u64, + + /// Master address + pub master_addr: String, + + /// Master epoch + pub master_epoch: u64, + + /// Sync state set (list of in-sync broker IDs) + pub sync_state_set: Vec, + + /// Last update timestamp + pub last_update_timestamp: u64, +} + +impl SyncStateSet { + /// Create a new sync state set + pub fn new(broker_name: String, master_broker_id: u64, master_addr: String) -> Self { + Self { + broker_name, + master_broker_id, + master_addr, + master_epoch: 0, + sync_state_set: vec![master_broker_id], + last_update_timestamp: SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(), + } + } + + /// Check if a broker is in the sync state set + pub fn contains(&self, broker_id: u64) -> bool { + self.sync_state_set.contains(&broker_id) + } + + /// Add a broker to the sync state set + pub fn add_broker(&mut self, broker_id: u64) { + if !self.sync_state_set.contains(&broker_id) { + self.sync_state_set.push(broker_id); + self.last_update_timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + } + } + + /// Remove a broker from the sync state set + pub fn remove_broker(&mut self, broker_id: u64) { + self.sync_state_set.retain(|&id| id != broker_id); + self.last_update_timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + } + + /// Get the size of the sync state set + pub fn size(&self) -> usize { + self.sync_state_set.len() + } + + /// Check if a broker is the master + pub fn is_master(&self, broker_id: u64) -> bool { + self.master_broker_id == broker_id + } +} + +/// Replicas manager +/// +/// Manages broker replicas and sync state sets across the cluster. +/// Provides functionality for: +/// - Replica registration and tracking +/// - ISR (In-Sync Replicas) management +/// - Master election and failover +pub struct ReplicasManager { + /// Configuration + config: Arc, + + /// Replicas: broker_name -> (broker_id -> BrokerReplicaInfo) + replicas: Arc>>, + + /// Sync state sets: broker_name -> SyncStateSet + sync_state_sets: Arc>, +} + +impl ReplicasManager { + /// Create a new replicas manager + pub fn new(config: Arc) -> Self { + Self { + config, + replicas: Arc::new(DashMap::new()), + sync_state_sets: Arc::new(DashMap::new()), + } + } + + /// Start the replicas manager + pub async fn start(&self) -> Result<()> { + info!("Starting replicas manager"); + Ok(()) + } + + /// Shutdown the replicas manager + pub async fn shutdown(&self) -> Result<()> { + info!("Shutting down replicas manager"); + self.replicas.clear(); + self.sync_state_sets.clear(); + Ok(()) + } + + /// Register a replica + pub async fn register_replica(&self, replica: BrokerReplicaInfo) -> Result<()> { + let broker_name = replica.broker_name.clone(); + let broker_id = replica.broker_id; + let is_master = replica.is_master(); + + debug!( + "Registering replica: {}:{} (role={:?})", + broker_name, broker_id, replica.role + ); + + // Update replicas map + self.replicas + .entry(broker_name.clone()) + .or_default() + .insert(broker_id, replica.clone()); + + // If this is a master, initialize or update sync state set + if is_master { + let mut sync_state_set = + SyncStateSet::new(broker_name.clone(), broker_id, replica.broker_addr.clone()); + sync_state_set.master_epoch = replica.epoch; + self.sync_state_sets.insert(broker_name, sync_state_set); + } + + Ok(()) + } + + /// Unregister a replica + pub async fn unregister_replica(&self, broker_name: &str, broker_id: u64) -> Result<()> { + debug!("Unregistering replica: {}:{}", broker_name, broker_id); + + // Remove from replicas map + if let Some(mut replicas) = self.replicas.get_mut(broker_name) { + replicas.remove(&broker_id); + if replicas.is_empty() { + drop(replicas); + self.replicas.remove(broker_name); + } + } + + // Remove from sync state set + if let Some(mut sync_state_set) = self.sync_state_sets.get_mut(broker_name) { + sync_state_set.remove_broker(broker_id); + } + + Ok(()) + } + + /// Get the master replica for a broker set + pub async fn get_master(&self, broker_name: &str) -> Option { + self.replicas.get(broker_name).and_then(|replicas| { + replicas + .values() + .find(|replica| replica.is_master()) + .cloned() + }) + } + + /// Get all replicas for a broker set + pub async fn get_replicas(&self, broker_name: &str) -> Vec { + self.replicas + .get(broker_name) + .map(|replicas| replicas.values().cloned().collect()) + .unwrap_or_default() + } + + /// Get a specific replica + pub async fn get_replica( + &self, + broker_name: &str, + broker_id: u64, + ) -> Option { + self.replicas + .get(broker_name) + .and_then(|replicas| replicas.get(&broker_id).cloned()) + } + + /// Get sync state set for a broker set + pub async fn get_sync_state_set(&self, broker_name: &str) -> Option { + self.sync_state_sets.get(broker_name).map(|s| s.clone()) + } + + /// Add a broker to the sync state set + pub async fn add_to_sync_state_set(&self, broker_name: &str, broker_id: u64) -> Result<()> { + if let Some(mut sync_state_set) = self.sync_state_sets.get_mut(broker_name) { + sync_state_set.add_broker(broker_id); + debug!( + "Added broker {} to sync state set for {}", + broker_id, broker_name + ); + } + + // Update replica in-sync status + if let Some(mut replicas) = self.replicas.get_mut(broker_name) { + if let Some(replica) = replicas.get_mut(&broker_id) { + replica.in_sync = true; + } + } + + Ok(()) + } + + /// Remove a broker from the sync state set + pub async fn remove_from_sync_state_set( + &self, + broker_name: &str, + broker_id: u64, + ) -> Result<()> { + if let Some(mut sync_state_set) = self.sync_state_sets.get_mut(broker_name) { + sync_state_set.remove_broker(broker_id); + warn!( + "Removed broker {} from sync state set for {}", + broker_id, broker_name + ); + } + + // Update replica in-sync status + if let Some(mut replicas) = self.replicas.get_mut(broker_name) { + if let Some(replica) = replicas.get_mut(&broker_id) { + replica.in_sync = false; + } + } + + Ok(()) + } + + /// Update sync state set + pub async fn update_sync_state_set( + &self, + broker_name: &str, + new_sync_state_set: Vec, + ) -> Result<()> { + if let Some(mut sync_state_set) = self.sync_state_sets.get_mut(broker_name) { + sync_state_set.sync_state_set = new_sync_state_set.clone(); + sync_state_set.last_update_timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + + debug!( + "Updated sync state set for {}: {:?}", + broker_name, new_sync_state_set + ); + } + + // Update replica in-sync status + if let Some(mut replicas) = self.replicas.get_mut(broker_name) { + for (broker_id, replica) in replicas.iter_mut() { + replica.in_sync = new_sync_state_set.contains(broker_id); + } + } + + Ok(()) + } + + /// Elect a new master for a broker set + /// + /// This is typically called when the current master fails. + /// A new master is elected from the in-sync replicas. + pub async fn elect_master(&self, broker_name: &str) -> Result> { + info!("Electing new master for broker set: {}", broker_name); + + // Get current sync state set + let sync_state_set = match self.sync_state_sets.get(broker_name) { + Some(s) => s.clone(), + None => { + warn!("No sync state set found for {}", broker_name); + return Ok(None); + } + }; + + // Find the first in-sync slave to promote + let new_master_id = sync_state_set + .sync_state_set + .iter() + .find(|&&id| id != sync_state_set.master_broker_id) + .copied(); + + let new_master_id = match new_master_id { + Some(id) => id, + None => { + warn!("No in-sync slaves available for {}", broker_name); + return Ok(None); + } + }; + + // Promote the slave to master + let mut new_master = None; + if let Some(mut replicas) = self.replicas.get_mut(broker_name) { + // Demote old master if it still exists + if let Some(old_master) = replicas.get_mut(&sync_state_set.master_broker_id) { + old_master.role = ReplicaRole::Slave; + } + + // Promote new master + if let Some(replica) = replicas.get_mut(&new_master_id) { + replica.role = ReplicaRole::Master; + replica.epoch += 1; + new_master = Some(replica.clone()); + + info!( + "Elected new master for {}: {} (epoch={})", + broker_name, new_master_id, replica.epoch + ); + } + } + + // Update sync state set + if let (Some(master), Some(mut sync_state_set)) = ( + new_master.as_ref(), + self.sync_state_sets.get_mut(broker_name), + ) { + sync_state_set.master_broker_id = new_master_id; + sync_state_set.master_addr = master.broker_addr.clone(); + sync_state_set.master_epoch = master.epoch; + sync_state_set.last_update_timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + } + + Ok(new_master) + } + + /// List all broker sets + pub async fn list_broker_sets(&self) -> Vec { + self.replicas + .iter() + .map(|entry| entry.key().clone()) + .collect() + } + + /// Get statistics + pub async fn get_stats(&self) -> HashMap { + let mut stats = HashMap::new(); + stats.insert("broker_sets".to_string(), self.replicas.len()); + stats.insert("sync_state_sets".to_string(), self.sync_state_sets.len()); + + let total_replicas: usize = self.replicas.iter().map(|entry| entry.value().len()).sum(); + stats.insert("total_replicas".to_string(), total_replicas); + + stats + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_broker_replica_info() { + let master = BrokerReplicaInfo::new_master( + "test-cluster".to_string(), + "broker-a".to_string(), + 0, + "127.0.0.1:10911".to_string(), + 1, + ); + + assert!(master.is_master()); + assert!(master.is_in_sync()); + assert_eq!(master.replica_id(), "broker-a:0"); + assert_eq!(master.epoch, 1); + + let slave = BrokerReplicaInfo::new_slave( + "test-cluster".to_string(), + "broker-a".to_string(), + 1, + "127.0.0.1:10912".to_string(), + ); + + assert!(!slave.is_master()); + assert!(!slave.is_in_sync()); + assert_eq!(slave.replica_id(), "broker-a:1"); + assert_eq!(slave.epoch, 0); + } + + #[test] + fn test_sync_state_set() { + let mut sync_state = + SyncStateSet::new("broker-a".to_string(), 0, "127.0.0.1:10911".to_string()); + + assert_eq!(sync_state.size(), 1); + assert!(sync_state.contains(0)); + assert!(sync_state.is_master(0)); + + sync_state.add_broker(1); + assert_eq!(sync_state.size(), 2); + assert!(sync_state.contains(1)); + assert!(!sync_state.is_master(1)); + + sync_state.remove_broker(1); + assert_eq!(sync_state.size(), 1); + assert!(!sync_state.contains(1)); + } + + #[tokio::test] + async fn test_replicas_manager() { + let config = Arc::new(ControllerConfig::new(1, "127.0.0.1:9876".parse().unwrap())); + let manager = ReplicasManager::new(config); + + // Register master + let master = BrokerReplicaInfo::new_master( + "test-cluster".to_string(), + "broker-a".to_string(), + 0, + "127.0.0.1:10911".to_string(), + 1, + ); + manager.register_replica(master.clone()).await.unwrap(); + + // Register slave + let slave = BrokerReplicaInfo::new_slave( + "test-cluster".to_string(), + "broker-a".to_string(), + 1, + "127.0.0.1:10912".to_string(), + ); + manager.register_replica(slave).await.unwrap(); + + // Verify + let replicas = manager.get_replicas("broker-a").await; + assert_eq!(replicas.len(), 2); + + let master_replica = manager.get_master("broker-a").await; + assert!(master_replica.is_some()); + assert_eq!(master_replica.unwrap().broker_id, 0); + + // Elect new master + manager.add_to_sync_state_set("broker-a", 1).await.unwrap(); + let new_master = manager.elect_master("broker-a").await.unwrap(); + assert!(new_master.is_some()); + assert_eq!(new_master.unwrap().broker_id, 1); + } +} diff --git a/rocketmq-controller/src/metadata/topic.rs b/rocketmq-controller/src/metadata/topic.rs new file mode 100644 index 00000000..a7eb0836 --- /dev/null +++ b/rocketmq-controller/src/metadata/topic.rs @@ -0,0 +1,255 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::sync::Arc; + +use dashmap::DashMap; +use serde::Deserialize; +use serde::Serialize; +use tracing::info; + +use crate::config::ControllerConfig; +use crate::error::ControllerError; +use crate::error::Result; + +/// Topic configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TopicConfig { + /// Topic name + pub topic_name: String, + + /// Number of read queues + pub read_queue_nums: u32, + + /// Number of write queues + pub write_queue_nums: u32, + + /// Permission + pub perm: u32, + + /// Topic filter type + pub topic_filter_type: u32, + + /// Topic system flag + pub topic_sys_flag: u32, + + /// Order + pub order: bool, + + /// Attributes + pub attributes: serde_json::Value, +} + +/// Topic information +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TopicInfo { + /// Topic name + pub name: String, + + /// Number of read queues + pub read_queue_nums: u32, + + /// Number of write queues + pub write_queue_nums: u32, + + /// Permission + pub perm: u32, + + /// Topic system flag + pub topic_sys_flag: u32, + + /// Broker addresses that have this topic + pub brokers: Vec, + + /// Additional metadata + pub metadata: serde_json::Value, +} + +/// Topic manager +pub struct TopicManager { + /// Topics: topic_name -> TopicInfo + topics: Arc>, + + /// Configuration + #[allow(dead_code)] + config: Arc, +} + +impl TopicManager { + /// Create a new topic manager + pub fn new(config: Arc) -> Self { + Self { + topics: Arc::new(DashMap::new()), + config, + } + } + + /// Start the topic manager + pub async fn start(&self) -> Result<()> { + info!("Starting topic manager"); + Ok(()) + } + + /// Shutdown the topic manager + pub async fn shutdown(&self) -> Result<()> { + info!("Shutting down topic manager"); + self.topics.clear(); + Ok(()) + } + + /// Create a topic from config + pub async fn create_topic(&self, config: TopicConfig) -> Result<()> { + info!("Creating topic: {}", config.topic_name); + + if config.topic_name.is_empty() { + return Err(ControllerError::InvalidRequest( + "Topic name cannot be empty".to_string(), + )); + } + + // Convert config to info + let info = TopicInfo { + name: config.topic_name.clone(), + read_queue_nums: config.read_queue_nums, + write_queue_nums: config.write_queue_nums, + perm: config.perm, + topic_sys_flag: config.topic_sys_flag, + brokers: Vec::new(), + metadata: config.attributes, + }; + + self.topics.insert(info.name.clone(), info); + Ok(()) + } + + /// Update a topic from config + pub async fn update_topic(&self, config: TopicConfig) -> Result<()> { + info!("Updating topic: {}", config.topic_name); + + if config.topic_name.is_empty() { + return Err(ControllerError::InvalidRequest( + "Topic name cannot be empty".to_string(), + )); + } + + // Check if topic exists + if !self.topics.contains_key(&config.topic_name) { + return Err(ControllerError::MetadataNotFound { + key: config.topic_name.clone(), + }); + } + + // Convert config to info (preserving brokers list) + let old_brokers = self + .topics + .get(&config.topic_name) + .map(|v| v.brokers.clone()) + .unwrap_or_default(); + + let info = TopicInfo { + name: config.topic_name.clone(), + read_queue_nums: config.read_queue_nums, + write_queue_nums: config.write_queue_nums, + perm: config.perm, + topic_sys_flag: config.topic_sys_flag, + brokers: old_brokers, + metadata: config.attributes, + }; + + self.topics.insert(info.name.clone(), info); + Ok(()) + } + + /// Create or update a topic + pub async fn create_or_update_topic(&self, info: TopicInfo) -> Result<()> { + info!("Creating/updating topic: {}", info.name); + + if info.name.is_empty() { + return Err(ControllerError::InvalidRequest( + "Topic name cannot be empty".to_string(), + )); + } + + self.topics.insert(info.name.clone(), info); + Ok(()) + } + + /// Delete a topic + pub async fn delete_topic(&self, topic_name: &str) -> Result<()> { + info!("Deleting topic: {}", topic_name); + + self.topics + .remove(topic_name) + .ok_or_else(|| ControllerError::MetadataNotFound { + key: topic_name.to_string(), + })?; + + Ok(()) + } + + /// Get topic information + pub async fn get_topic(&self, topic_name: &str) -> Result { + self.topics + .get(topic_name) + .map(|entry| entry.value().clone()) + .ok_or_else(|| ControllerError::MetadataNotFound { + key: topic_name.to_string(), + }) + } + + /// List all topics + pub async fn list_topics(&self) -> Vec { + self.topics + .iter() + .map(|entry| entry.value().clone()) + .collect() + } + + /// List topics by broker + pub async fn list_topics_by_broker(&self, broker_name: &str) -> Vec { + self.topics + .iter() + .filter(|entry| entry.value().brokers.contains(&broker_name.to_string())) + .map(|entry| entry.value().clone()) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_topic_creation() { + let config = Arc::new(ControllerConfig::test_config()); + + let manager = TopicManager::new(config); + + let info = TopicInfo { + name: "TestTopic".to_string(), + read_queue_nums: 4, + write_queue_nums: 4, + perm: 6, + topic_sys_flag: 0, + brokers: vec!["broker-a".to_string()], + metadata: serde_json::json!({}), + }; + + assert!(manager.create_or_update_topic(info.clone()).await.is_ok()); + assert!(manager.get_topic("TestTopic").await.is_ok()); + } +} diff --git a/rocketmq-controller/src/processor/broker_processor.rs b/rocketmq-controller/src/processor/broker_processor.rs new file mode 100644 index 00000000..6ae79738 --- /dev/null +++ b/rocketmq-controller/src/processor/broker_processor.rs @@ -0,0 +1,349 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::sync::Arc; +use std::time::SystemTime; + +use tracing::debug; +use tracing::error; +use tracing::info; + +use crate::error::ControllerError; +use crate::error::Result; +use crate::metadata::BrokerInfo; +use crate::metadata::MetadataStore; +use crate::processor::request::BrokerHeartbeatRequest; +use crate::processor::request::BrokerHeartbeatResponse; +use crate::processor::request::ElectMasterRequest; +use crate::processor::request::ElectMasterResponse; +use crate::processor::request::RegisterBrokerRequest; +use crate::processor::request::RegisterBrokerResponse; +use crate::processor::request::UnregisterBrokerRequest; +use crate::processor::request::UnregisterBrokerResponse; +use crate::processor::RequestProcessor; +use crate::raft::RaftController; + +/// Register broker processor +pub struct RegisterBrokerProcessor { + /// Metadata store + metadata: Arc, + + /// Raft controller + raft: Arc, +} + +impl RegisterBrokerProcessor { + /// Create a new register broker processor + pub fn new(metadata: Arc, raft: Arc) -> Self { + Self { metadata, raft } + } + + /// Process register broker request + pub async fn process_request( + &self, + request: RegisterBrokerRequest, + ) -> Result { + info!( + "Processing register broker request: {}", + request.broker_name + ); + + // Check if we are the leader + if !self.raft.is_leader().await { + let leader = self.raft.get_leader().await; + error!("Not leader, current leader: {:?}", leader); + return Ok(RegisterBrokerResponse { + success: false, + error: Some(format!("Not leader, current leader: {:?}", leader)), + broker_id: None, + }); + } + + // Create broker info + let broker_info = BrokerInfo { + name: request.broker_name.clone(), + broker_id: request.broker_id, + cluster_name: request.cluster_name.clone(), + addr: request.broker_addr, + last_heartbeat: SystemTime::now(), + version: request.version.clone(), + role: request.role, + metadata: request.metadata.clone(), + }; + + // Register broker + match self.metadata.broker_manager().register(broker_info).await { + Ok(()) => { + info!("Successfully registered broker: {}", request.broker_name); + Ok(RegisterBrokerResponse { + success: true, + error: None, + broker_id: Some(request.broker_id), + }) + } + Err(e) => { + error!("Failed to register broker {}: {}", request.broker_name, e); + Ok(RegisterBrokerResponse { + success: false, + error: Some(e.to_string()), + broker_id: None, + }) + } + } + } +} + +#[async_trait::async_trait] +impl RequestProcessor for RegisterBrokerProcessor { + async fn process(&self, request: &[u8]) -> Result> { + // Deserialize request + let req: RegisterBrokerRequest = serde_json::from_slice(request) + .map_err(|e| ControllerError::InvalidRequest(e.to_string()))?; + + // Process request + let response = self.process_request(req).await?; + + // Serialize response + serde_json::to_vec(&response) + .map_err(|e| ControllerError::SerializationError(e.to_string())) + } +} + +/// Unregister broker processor +pub struct UnregisterBrokerProcessor { + /// Metadata store + metadata: Arc, + + /// Raft controller + raft: Arc, +} + +impl UnregisterBrokerProcessor { + /// Create a new unregister broker processor + pub fn new(metadata: Arc, raft: Arc) -> Self { + Self { metadata, raft } + } + + /// Process unregister broker request + pub async fn process_request( + &self, + request: UnregisterBrokerRequest, + ) -> Result { + info!( + "Processing unregister broker request: {}", + request.broker_name + ); + + // Check if we are the leader + if !self.raft.is_leader().await { + let leader = self.raft.get_leader().await; + return Ok(UnregisterBrokerResponse { + success: false, + error: Some(format!("Not leader, current leader: {:?}", leader)), + }); + } + + // Unregister broker + match self + .metadata + .broker_manager() + .unregister(&request.broker_name) + .await + { + Ok(()) => { + info!("Successfully unregistered broker: {}", request.broker_name); + Ok(UnregisterBrokerResponse { + success: true, + error: None, + }) + } + Err(e) => { + error!("Failed to unregister broker {}: {}", request.broker_name, e); + Ok(UnregisterBrokerResponse { + success: false, + error: Some(e.to_string()), + }) + } + } + } +} + +#[async_trait::async_trait] +impl RequestProcessor for UnregisterBrokerProcessor { + async fn process(&self, request: &[u8]) -> Result> { + let req: UnregisterBrokerRequest = serde_json::from_slice(request) + .map_err(|e| ControllerError::InvalidRequest(e.to_string()))?; + + let response = self.process_request(req).await?; + + serde_json::to_vec(&response) + .map_err(|e| ControllerError::SerializationError(e.to_string())) + } +} + +/// Broker heartbeat processor +pub struct BrokerHeartbeatProcessor { + /// Metadata store + metadata: Arc, +} + +impl BrokerHeartbeatProcessor { + /// Create a new broker heartbeat processor + pub fn new(metadata: Arc) -> Self { + Self { metadata } + } + + /// Process broker heartbeat request + pub async fn process_request( + &self, + request: BrokerHeartbeatRequest, + ) -> Result { + debug!("Processing broker heartbeat: {}", request.broker_name); + + // Update heartbeat + match self + .metadata + .broker_manager() + .heartbeat(&request.broker_name) + .await + { + Ok(()) => { + debug!( + "Successfully updated heartbeat for broker: {}", + request.broker_name + ); + Ok(BrokerHeartbeatResponse { + success: true, + error: None, + }) + } + Err(e) => { + error!( + "Failed to update heartbeat for broker {}: {}", + request.broker_name, e + ); + Ok(BrokerHeartbeatResponse { + success: false, + error: Some(e.to_string()), + }) + } + } + } +} + +#[async_trait::async_trait] +impl RequestProcessor for BrokerHeartbeatProcessor { + async fn process(&self, request: &[u8]) -> Result> { + let req: BrokerHeartbeatRequest = serde_json::from_slice(request) + .map_err(|e| ControllerError::InvalidRequest(e.to_string()))?; + + let response = self.process_request(req).await?; + + serde_json::to_vec(&response) + .map_err(|e| ControllerError::SerializationError(e.to_string())) + } +} + +/// Elect master processor +pub struct ElectMasterProcessor { + /// Metadata store + metadata: Arc, + + /// Raft controller + raft: Arc, +} + +impl ElectMasterProcessor { + /// Create a new elect master processor + pub fn new(metadata: Arc, raft: Arc) -> Self { + Self { metadata, raft } + } + + /// Process elect master request + pub async fn process_request( + &self, + request: ElectMasterRequest, + ) -> Result { + info!( + "Processing elect master request for cluster: {}, broker: {}", + request.cluster_name, request.broker_name + ); + + // Check if we are the leader + if !self.raft.is_leader().await { + let leader = self.raft.get_leader().await; + return Ok(ElectMasterResponse { + success: false, + error: Some(format!("Not leader, current leader: {:?}", leader)), + master_broker: None, + master_addr: None, + }); + } + + // Get brokers in the cluster + let brokers = self + .metadata + .broker_manager() + .list_brokers_by_cluster(&request.cluster_name) + .await; + + if brokers.is_empty() { + return Ok(ElectMasterResponse { + success: false, + error: Some("No brokers found in cluster".to_string()), + master_broker: None, + master_addr: None, + }); + } + + // Find the master broker (simple logic: first broker with Master role) + let master = brokers + .iter() + .find(|b| b.role == crate::metadata::BrokerRole::Master); + + match master { + Some(broker) => { + info!("Elected master broker: {}", broker.name); + Ok(ElectMasterResponse { + success: true, + error: None, + master_broker: Some(broker.name.clone()), + master_addr: Some(broker.addr), + }) + } + None => Ok(ElectMasterResponse { + success: false, + error: Some("No master broker found in cluster".to_string()), + master_broker: None, + master_addr: None, + }), + } + } +} + +#[async_trait::async_trait] +impl RequestProcessor for ElectMasterProcessor { + async fn process(&self, request: &[u8]) -> Result> { + let req: ElectMasterRequest = serde_json::from_slice(request) + .map_err(|e| ControllerError::InvalidRequest(e.to_string()))?; + + let response = self.process_request(req).await?; + + serde_json::to_vec(&response) + .map_err(|e| ControllerError::SerializationError(e.to_string())) + } +} diff --git a/rocketmq-controller/src/processor/metadata_processor.rs b/rocketmq-controller/src/processor/metadata_processor.rs new file mode 100644 index 00000000..409424ec --- /dev/null +++ b/rocketmq-controller/src/processor/metadata_processor.rs @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::sync::Arc; + +use serde_json::json; +use tracing::info; + +use crate::error::ControllerError; +use crate::error::Result; +use crate::metadata::MetadataStore; +use crate::processor::request::GetMetadataRequest; +use crate::processor::request::GetMetadataResponse; +use crate::processor::request::MetadataType; +use crate::processor::RequestProcessor; + +/// Get metadata processor +pub struct GetMetadataProcessor { + /// Metadata store + metadata: Arc, +} + +impl GetMetadataProcessor { + /// Create a new get metadata processor + pub fn new(metadata: Arc) -> Self { + Self { metadata } + } + + /// Process get metadata request + pub async fn process_request( + &self, + request: GetMetadataRequest, + ) -> Result { + info!( + "Processing get metadata request, type: {:?}", + request.metadata_type + ); + + let (brokers, topics, configs) = match request.metadata_type { + MetadataType::Broker => { + let brokers = self.metadata.broker_manager().list_brokers().await; + (brokers, Vec::new(), json!({})) + } + MetadataType::Topic => { + let topics = self.metadata.topic_manager().list_topics().await; + (Vec::new(), topics, json!({})) + } + MetadataType::Config => { + let configs = self.metadata.config_manager().list_configs().await; + (Vec::new(), Vec::new(), json!(configs)) + } + MetadataType::All => { + // Get all metadata + let brokers = self.metadata.broker_manager().list_brokers().await; + let topics = self.metadata.topic_manager().list_topics().await; + let configs = self.metadata.config_manager().list_configs().await; + (brokers, topics, json!(configs)) + } + }; + + Ok(GetMetadataResponse { + success: true, + error: None, + brokers, + topics, + configs, + }) + } +} + +#[async_trait::async_trait] +impl RequestProcessor for GetMetadataProcessor { + async fn process(&self, request: &[u8]) -> Result> { + let req: GetMetadataRequest = serde_json::from_slice(request) + .map_err(|e| ControllerError::InvalidRequest(e.to_string()))?; + + let response = self.process_request(req).await?; + + serde_json::to_vec(&response) + .map_err(|e| ControllerError::SerializationError(e.to_string())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::ControllerConfig; + use crate::config::RaftPeer; + + #[tokio::test] + async fn test_get_metadata_processor() { + let config = Arc::new(ControllerConfig::test_config()); + + let metadata = Arc::new(MetadataStore::new(config.clone()).await.unwrap()); + let processor = GetMetadataProcessor::new(metadata); + + let request = GetMetadataRequest { + metadata_type: MetadataType::All, + key: None, + }; + + let response = processor.process_request(request).await.unwrap(); + assert!(response.success); + assert!(!response.brokers.is_empty() || response.brokers.is_empty()); + assert!(!response.topics.is_empty() || response.topics.is_empty()); + } +} diff --git a/rocketmq-controller/src/processor/mod.rs b/rocketmq-controller/src/processor/mod.rs new file mode 100644 index 00000000..5881a5a8 --- /dev/null +++ b/rocketmq-controller/src/processor/mod.rs @@ -0,0 +1,165 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pub mod broker_processor; +pub mod metadata_processor; +pub mod request; +pub mod topic_processor; + +use std::collections::HashMap; +use std::sync::Arc; + +// Re-export processors +pub use broker_processor::{ + BrokerHeartbeatProcessor, ElectMasterProcessor, RegisterBrokerProcessor, + UnregisterBrokerProcessor, +}; +pub use metadata_processor::GetMetadataProcessor; +pub use request::RequestType; +pub use request::*; +pub use topic_processor::CreateTopicProcessor; +pub use topic_processor::DeleteTopicProcessor; +pub use topic_processor::UpdateTopicProcessor; +use tracing::info; + +use crate::config::ControllerConfig; +use crate::error::ControllerError; +use crate::error::Result; +use crate::metadata::MetadataStore; +use crate::raft::RaftController; + +/// Request processor trait +#[async_trait::async_trait] +pub trait RequestProcessor: Send + Sync { + /// Process a request + async fn process(&self, request: &[u8]) -> Result>; +} + +/// Processor manager +/// +/// This component manages all request processors for handling +/// RPC requests from brokers and clients. +pub struct ProcessorManager { + /// Configuration + config: Arc, + + /// Raft controller + raft: Arc, + + /// Metadata store + metadata: Arc, + + /// Processor registry + processors: HashMap>, +} + +impl ProcessorManager { + /// Create a new processor manager + pub fn new( + config: Arc, + raft: Arc, + metadata: Arc, + ) -> Self { + // Initialize processors + let mut processors: HashMap> = HashMap::new(); + + // Register broker processors + processors.insert( + RequestType::RegisterBroker, + Arc::new(RegisterBrokerProcessor::new(metadata.clone(), raft.clone())), + ); + processors.insert( + RequestType::UnregisterBroker, + Arc::new(UnregisterBrokerProcessor::new( + metadata.clone(), + raft.clone(), + )), + ); + processors.insert( + RequestType::BrokerHeartbeat, + Arc::new(BrokerHeartbeatProcessor::new(metadata.clone())), + ); + processors.insert( + RequestType::ElectMaster, + Arc::new(ElectMasterProcessor::new(metadata.clone(), raft.clone())), + ); + + // Register metadata processor + processors.insert( + RequestType::GetMetadata, + Arc::new(GetMetadataProcessor::new(metadata.clone())), + ); + + // Register topic processors + processors.insert( + RequestType::CreateTopic, + Arc::new(CreateTopicProcessor::new(metadata.clone(), raft.clone())), + ); + processors.insert( + RequestType::UpdateTopic, + Arc::new(UpdateTopicProcessor::new(metadata.clone(), raft.clone())), + ); + processors.insert( + RequestType::DeleteTopic, + Arc::new(DeleteTopicProcessor::new(metadata.clone(), raft.clone())), + ); + + Self { + config, + raft, + metadata, + processors, + } + } + + /// Process a request + pub async fn process_request(&self, request_type: RequestType, data: &[u8]) -> Result> { + // Find the processor + let processor = self.processors.get(&request_type).ok_or_else(|| { + ControllerError::InvalidRequest(format!("Unknown request type: {:?}", request_type)) + })?; + + // Process the request + processor.process(data).await + } + + /// Start the processor manager + pub async fn start(&self) -> Result<()> { + info!( + "Starting processor manager with {} processors", + self.processors.len() + ); + // TODO: Start network server to handle incoming requests + Ok(()) + } + + /// Shutdown the processor manager + pub async fn shutdown(&self) -> Result<()> { + info!("Shutting down processor manager"); + // TODO: Stop network server and cleanup + Ok(()) + } +} + +#[cfg(test)] +mod tests { + #[tokio::test] + async fn test_processor_manager() { + // Placeholder test + assert!(true); + } +} diff --git a/rocketmq-controller/src/processor/request.rs b/rocketmq-controller/src/processor/request.rs new file mode 100644 index 00000000..61fc6b06 --- /dev/null +++ b/rocketmq-controller/src/processor/request.rs @@ -0,0 +1,267 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::net::SocketAddr; + +use serde::Deserialize; +use serde::Serialize; + +use crate::metadata::BrokerInfo; +use crate::metadata::BrokerRole; +use crate::metadata::TopicInfo; + +/// Request type enumeration +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub enum RequestType { + RegisterBroker, + UnregisterBroker, + BrokerHeartbeat, + ElectMaster, + GetMetadata, + CreateTopic, + UpdateTopic, + DeleteTopic, + UpdateConfig, + GetConfig, +} + +/// Register broker request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RegisterBrokerRequest { + /// Broker name + pub broker_name: String, + + /// Broker ID + pub broker_id: u64, + + /// Cluster name + pub cluster_name: String, + + /// Broker address + pub broker_addr: SocketAddr, + + /// Broker version + pub version: String, + + /// Broker role + pub role: BrokerRole, + + /// Additional metadata + pub metadata: serde_json::Value, +} + +/// Register broker response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RegisterBrokerResponse { + /// Success flag + pub success: bool, + + /// Error message if failed + pub error: Option, + + /// Assigned broker ID + pub broker_id: Option, +} + +/// Unregister broker request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UnregisterBrokerRequest { + /// Broker name + pub broker_name: String, +} + +/// Unregister broker response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UnregisterBrokerResponse { + /// Success flag + pub success: bool, + + /// Error message if failed + pub error: Option, +} + +/// Broker heartbeat request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BrokerHeartbeatRequest { + /// Broker name + pub broker_name: String, + + /// Timestamp + pub timestamp: u64, +} + +/// Broker heartbeat response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BrokerHeartbeatResponse { + /// Success flag + pub success: bool, + + /// Error message if failed + pub error: Option, +} + +/// Elect master request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ElectMasterRequest { + /// Cluster name + pub cluster_name: String, + + /// Broker name + pub broker_name: String, +} + +/// Elect master response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ElectMasterResponse { + /// Success flag + pub success: bool, + + /// Error message if failed + pub error: Option, + + /// Master broker name + pub master_broker: Option, + + /// Master broker address + pub master_addr: Option, +} + +/// Get metadata request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GetMetadataRequest { + /// Metadata type + pub metadata_type: MetadataType, + + /// Filter key (optional) + pub key: Option, +} + +/// Metadata type +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum MetadataType { + Broker, + Topic, + Config, + All, +} + +/// Get metadata response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GetMetadataResponse { + /// Success flag + pub success: bool, + + /// Error message if failed + pub error: Option, + + /// Brokers + pub brokers: Vec, + + /// Topics + pub topics: Vec, + + /// Configs + pub configs: serde_json::Value, +} + +/// Create topic request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CreateTopicRequest { + /// Topic name + pub topic_name: String, + + /// Read queue nums + pub read_queue_nums: u32, + + /// Write queue nums + pub write_queue_nums: u32, + + /// Permission + pub perm: u32, + + /// Topic system flag + pub topic_sys_flag: u32, +} + +/// Create topic response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CreateTopicResponse { + /// Success flag + pub success: bool, + + /// Error message if failed + pub error: Option, +} + +/// Update topic request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UpdateTopicRequest { + /// Topic name + pub topic_name: String, + + /// Topic info + pub topic_info: TopicInfo, +} + +/// Update topic response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UpdateTopicResponse { + /// Success flag + pub success: bool, + + /// Error message if failed + pub error: Option, +} + +/// Delete topic request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DeleteTopicRequest { + /// Topic name + pub topic_name: String, +} + +/// Delete topic response +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DeleteTopicResponse { + /// Success flag + pub success: bool, + + /// Error message if failed + pub error: Option, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_request_serialization() { + let req = RegisterBrokerRequest { + broker_name: "broker-a".to_string(), + broker_id: 0, + cluster_name: "DefaultCluster".to_string(), + broker_addr: "127.0.0.1:10911".parse().unwrap(), + version: "5.0.0".to_string(), + role: BrokerRole::Master, + metadata: serde_json::json!({}), + }; + + let json = serde_json::to_string(&req).unwrap(); + let deserialized: RegisterBrokerRequest = serde_json::from_str(&json).unwrap(); + assert_eq!(req.broker_name, deserialized.broker_name); + } +} diff --git a/rocketmq-controller/src/processor/topic_processor.rs b/rocketmq-controller/src/processor/topic_processor.rs new file mode 100644 index 00000000..b91b824b --- /dev/null +++ b/rocketmq-controller/src/processor/topic_processor.rs @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::sync::Arc; + +use tracing::error; +use tracing::info; + +use crate::error::ControllerError; +use crate::error::Result; +use crate::metadata::MetadataStore; +use crate::metadata::TopicConfig; +use crate::processor::request::CreateTopicRequest; +use crate::processor::request::CreateTopicResponse; +use crate::processor::request::DeleteTopicRequest; +use crate::processor::request::DeleteTopicResponse; +use crate::processor::request::UpdateTopicRequest; +use crate::processor::request::UpdateTopicResponse; +use crate::processor::RequestProcessor; +use crate::raft::RaftController; + +/// Create topic processor +pub struct CreateTopicProcessor { + /// Metadata store + metadata: Arc, + + /// Raft controller + raft: Arc, +} + +impl CreateTopicProcessor { + /// Create a new create topic processor + pub fn new(metadata: Arc, raft: Arc) -> Self { + Self { metadata, raft } + } + + /// Process create topic request + pub async fn process_request( + &self, + request: CreateTopicRequest, + ) -> Result { + info!("Processing create topic request: {}", request.topic_name); + + // Check if we are the leader + if !self.raft.is_leader().await { + let leader = self.raft.get_leader().await; + error!("Not leader, current leader: {:?}", leader); + return Ok(CreateTopicResponse { + success: false, + error: Some(format!("Not leader, current leader: {:?}", leader)), + }); + } + + // Validate request + if request.read_queue_nums == 0 || request.write_queue_nums == 0 { + return Ok(CreateTopicResponse { + success: false, + error: Some("Queue nums must be greater than 0".to_string()), + }); + } + + // Create topic config + let config = TopicConfig { + topic_name: request.topic_name.clone(), + read_queue_nums: request.read_queue_nums, + write_queue_nums: request.write_queue_nums, + perm: request.perm, + topic_filter_type: 0, + topic_sys_flag: request.topic_sys_flag, + order: false, + attributes: serde_json::Value::Null, + }; + + // Create topic + match self.metadata.topic_manager().create_topic(config).await { + Ok(()) => { + info!("Successfully created topic: {}", request.topic_name); + Ok(CreateTopicResponse { + success: true, + error: None, + }) + } + Err(e) => { + error!("Failed to create topic {}: {}", request.topic_name, e); + Ok(CreateTopicResponse { + success: false, + error: Some(e.to_string()), + }) + } + } + } +} + +#[async_trait::async_trait] +impl RequestProcessor for CreateTopicProcessor { + async fn process(&self, request: &[u8]) -> Result> { + let req: CreateTopicRequest = serde_json::from_slice(request) + .map_err(|e| ControllerError::InvalidRequest(e.to_string()))?; + + let response = self.process_request(req).await?; + + serde_json::to_vec(&response) + .map_err(|e| ControllerError::SerializationError(e.to_string())) + } +} + +/// Update topic processor +pub struct UpdateTopicProcessor { + /// Metadata store + metadata: Arc, + + /// Raft controller + raft: Arc, +} + +impl UpdateTopicProcessor { + /// Create a new update topic processor + pub fn new(metadata: Arc, raft: Arc) -> Self { + Self { metadata, raft } + } + + /// Process update topic request + pub async fn process_request( + &self, + request: UpdateTopicRequest, + ) -> Result { + info!("Processing update topic request: {}", request.topic_name); + + // Check if we are the leader + if !self.raft.is_leader().await { + let leader = self.raft.get_leader().await; + return Ok(UpdateTopicResponse { + success: false, + error: Some(format!("Not leader, current leader: {:?}", leader)), + }); + } + + // Create updated config + let config = TopicConfig { + topic_name: request.topic_name.clone(), + read_queue_nums: request.topic_info.read_queue_nums, + write_queue_nums: request.topic_info.write_queue_nums, + perm: request.topic_info.perm, + topic_filter_type: 0, + topic_sys_flag: request.topic_info.topic_sys_flag, + order: false, + attributes: request.topic_info.metadata.clone(), + }; + + // Update topic + match self.metadata.topic_manager().update_topic(config).await { + Ok(()) => { + info!("Successfully updated topic: {}", request.topic_name); + Ok(UpdateTopicResponse { + success: true, + error: None, + }) + } + Err(e) => { + error!("Failed to update topic {}: {}", request.topic_name, e); + Ok(UpdateTopicResponse { + success: false, + error: Some(e.to_string()), + }) + } + } + } +} + +#[async_trait::async_trait] +impl RequestProcessor for UpdateTopicProcessor { + async fn process(&self, request: &[u8]) -> Result> { + let req: UpdateTopicRequest = serde_json::from_slice(request) + .map_err(|e| ControllerError::InvalidRequest(e.to_string()))?; + + let response = self.process_request(req).await?; + + serde_json::to_vec(&response) + .map_err(|e| ControllerError::SerializationError(e.to_string())) + } +} + +/// Delete topic processor +pub struct DeleteTopicProcessor { + /// Metadata store + metadata: Arc, + + /// Raft controller + raft: Arc, +} + +impl DeleteTopicProcessor { + /// Create a new delete topic processor + pub fn new(metadata: Arc, raft: Arc) -> Self { + Self { metadata, raft } + } + + /// Process delete topic request + pub async fn process_request( + &self, + request: DeleteTopicRequest, + ) -> Result { + info!("Processing delete topic request: {}", request.topic_name); + + // Check if we are the leader + if !self.raft.is_leader().await { + let leader = self.raft.get_leader().await; + return Ok(DeleteTopicResponse { + success: false, + error: Some(format!("Not leader, current leader: {:?}", leader)), + }); + } + + // Delete topic + match self + .metadata + .topic_manager() + .delete_topic(&request.topic_name) + .await + { + Ok(()) => { + info!("Successfully deleted topic: {}", request.topic_name); + Ok(DeleteTopicResponse { + success: true, + error: None, + }) + } + Err(e) => { + error!("Failed to delete topic {}: {}", request.topic_name, e); + Ok(DeleteTopicResponse { + success: false, + error: Some(e.to_string()), + }) + } + } + } +} + +#[async_trait::async_trait] +impl RequestProcessor for DeleteTopicProcessor { + async fn process(&self, request: &[u8]) -> Result> { + let req: DeleteTopicRequest = serde_json::from_slice(request) + .map_err(|e| ControllerError::InvalidRequest(e.to_string()))?; + + let response = self.process_request(req).await?; + + serde_json::to_vec(&response) + .map_err(|e| ControllerError::SerializationError(e.to_string())) + } +} diff --git a/rocketmq-controller/src/raft/mod.rs b/rocketmq-controller/src/raft/mod.rs new file mode 100644 index 00000000..06ef044c --- /dev/null +++ b/rocketmq-controller/src/raft/mod.rs @@ -0,0 +1,294 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +mod network; +mod node; +mod storage; +mod transport; + +use std::sync::Arc; +use std::time::Duration; + +pub use network::NetworkManager; +pub use node::RaftNode; +use raft::prelude::*; +pub use storage::MemStorage; +use tokio::sync::mpsc; +use tokio::sync::RwLock; +use tokio::time; +use tracing::debug; +use tracing::error; +use tracing::info; +pub use transport::MessageCodec; +pub use transport::PeerConnection; +pub use transport::RaftTransport; + +use crate::config::ControllerConfig; +use crate::error::ControllerError; +use crate::error::Result; + +/// Messages that can be sent to the Raft controller +#[derive(Debug)] +pub enum RaftMessage { + /// Propose a new entry + Propose { + data: Vec, + response: tokio::sync::oneshot::Sender>>, + }, + /// Process a Raft message from peer + Step { message: Message }, + /// Tick the Raft state machine + Tick, + /// Query current state (read-only) + Query { + data: Vec, + response: tokio::sync::oneshot::Sender>>, + }, + /// Shutdown the Raft controller + Shutdown, +} + +/// Raft controller - replaces Java DLedger +/// +/// This component provides distributed consensus using the Raft algorithm. +/// It manages: +/// - Leader election +/// - Log replication +/// - Snapshot management +/// - State machine application +/// - Network communication +pub struct RaftController { + /// Node ID + node_id: u64, + + /// Raft node + node: Arc>>, + + /// Network manager + network: Arc>>, + + /// Message sender + tx: mpsc::UnboundedSender, + + /// Configuration + config: Arc, +} + +impl RaftController { + /// Create a new Raft controller + pub async fn new(config: Arc) -> Result { + let node_id = config.node_id; + let (tx, rx) = mpsc::unbounded_channel(); + + let controller = Self { + node_id, + node: Arc::new(RwLock::new(None)), + network: Arc::new(RwLock::new(None)), + tx, + config: config.clone(), + }; + + // Initialize Raft node + let node = RaftNode::new(node_id, config.clone()).await?; + *controller.node.write().await = Some(node); + + // Initialize network manager + let (network_manager, incoming_rx) = NetworkManager::new(config.clone()); + *controller.network.write().await = Some(network_manager); + + // Start message processing loop + let node_clone = controller.node.clone(); + tokio::spawn(async move { + Self::message_loop(node_clone, rx).await; + }); + + // Start incoming message handler + let tx_clone = controller.tx.clone(); + tokio::spawn(async move { + Self::incoming_message_loop(incoming_rx, tx_clone).await; + }); + + Ok(controller) + } + + /// Start the Raft controller + pub async fn start(&self) -> Result<()> { + info!("Starting Raft controller for node {}", self.node_id); + + // Start network manager + if let Some(network) = self.network.write().await.as_mut() { + network.start().await?; + } + + // Start tick timer + let tx = self.tx.clone(); + tokio::spawn(async move { + let mut interval = time::interval(Duration::from_millis(100)); + loop { + interval.tick().await; + if tx.send(RaftMessage::Tick).is_err() { + break; + } + } + }); + + Ok(()) + } + + /// Shutdown the Raft controller + pub async fn shutdown(&self) -> Result<()> { + info!("Shutting down Raft controller for node {}", self.node_id); + + // Shutdown network manager + if let Some(network) = self.network.read().await.as_ref() { + network.shutdown().await?; + } + + self.tx + .send(RaftMessage::Shutdown) + .map_err(|_| ControllerError::Shutdown)?; + Ok(()) + } + + /// Propose a new entry (write operation) + pub async fn propose(&self, data: Vec) -> Result> { + let (tx, rx) = tokio::sync::oneshot::channel(); + self.tx + .send(RaftMessage::Propose { data, response: tx }) + .map_err(|_| ControllerError::Shutdown)?; + + rx.await + .map_err(|_| ControllerError::Internal("Response channel closed".to_string()))? + } + + /// Query current state (read-only operation) + pub async fn query(&self, data: Vec) -> Result> { + let (tx, rx) = tokio::sync::oneshot::channel(); + self.tx + .send(RaftMessage::Query { data, response: tx }) + .map_err(|_| ControllerError::Shutdown)?; + + rx.await + .map_err(|_| ControllerError::Internal("Response channel closed".to_string()))? + } + + /// Check if this node is the leader + pub async fn is_leader(&self) -> bool { + if let Some(node) = self.node.read().await.as_ref() { + node.is_leader().await + } else { + false + } + } + + /// Get the current leader ID + pub async fn get_leader(&self) -> Option { + if let Some(node) = self.node.read().await.as_ref() { + node.get_leader().await + } else { + None + } + } + + /// Handle a Raft message from a peer + pub async fn step(&self, message: Message) -> Result<()> { + self.tx + .send(RaftMessage::Step { message }) + .map_err(|_| ControllerError::Shutdown)?; + Ok(()) + } + + /// Message processing loop + async fn message_loop( + node: Arc>>, + mut rx: mpsc::UnboundedReceiver, + ) { + while let Some(msg) = rx.recv().await { + match msg { + RaftMessage::Propose { data, response } => { + let result = if let Some(n) = node.read().await.as_ref() { + n.propose(data).await + } else { + Err(ControllerError::Internal( + "Node not initialized".to_string(), + )) + }; + let _ = response.send(result); + } + RaftMessage::Step { message } => { + if let Some(n) = node.read().await.as_ref() { + if let Err(e) = n.step(message).await { + error!("Failed to step Raft: {}", e); + } + } + } + RaftMessage::Tick => { + if let Some(n) = node.read().await.as_ref() { + if let Err(e) = n.tick().await { + error!("Failed to tick Raft: {}", e); + } + } + } + RaftMessage::Query { data, response } => { + let result = if let Some(n) = node.read().await.as_ref() { + n.query(data).await + } else { + Err(ControllerError::Internal( + "Node not initialized".to_string(), + )) + }; + let _ = response.send(result); + } + RaftMessage::Shutdown => { + info!("Raft controller shutting down"); + break; + } + } + } + } + + /// Incoming message loop - handles messages from network + async fn incoming_message_loop( + mut incoming_rx: mpsc::UnboundedReceiver, + tx: mpsc::UnboundedSender, + ) { + info!("Starting incoming message loop"); + + while let Some(message) = incoming_rx.recv().await { + debug!( + "Received Raft message from network: {:?}", + message.get_msg_type() + ); + + if tx.send(RaftMessage::Step { message }).is_err() { + error!("Failed to forward incoming message to Raft"); + break; + } + } + + info!("Incoming message loop stopped"); + } +} + +#[cfg(test)] +mod tests { + #[tokio::test] + async fn test_raft_controller_creation() { + // Placeholder test - actual test would require full setup + assert!(true); + } +} diff --git a/rocketmq-controller/src/raft/network.rs b/rocketmq-controller/src/raft/network.rs new file mode 100644 index 00000000..a2083e0c --- /dev/null +++ b/rocketmq-controller/src/raft/network.rs @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashMap; +use std::sync::Arc; + +use raft::prelude::Message; +use tokio::sync::mpsc; +use tracing::debug; +use tracing::error; +use tracing::info; + +use crate::config::ControllerConfig; +use crate::error::Result; +use crate::raft::RaftTransport; + +/// Network manager for Raft communication +/// +/// This component manages the network layer for Raft messages, +/// handling both sending and receiving messages between nodes. +pub struct NetworkManager { + /// Configuration + config: Arc, + + /// Transport layer + transport: Arc, + + /// Message receiver for outgoing messages + outgoing_rx: Option>, + + /// Message sender for incoming messages + incoming_tx: mpsc::UnboundedSender, + + /// Running state + running: Arc>, +} + +impl NetworkManager { + /// Create a new network manager + pub fn new(config: Arc) -> (Self, mpsc::UnboundedReceiver) { + // Build peer address map + let mut peer_addrs = HashMap::new(); + for peer in &config.raft_peers { + peer_addrs.insert(peer.id, peer.addr); + } + + // Create transport + let (transport, outgoing_rx, incoming_rx) = + RaftTransport::new(config.node_id, config.listen_addr, peer_addrs); + + let incoming_tx = transport.message_sender(); + + let manager = Self { + config, + transport: Arc::new(transport), + outgoing_rx: Some(outgoing_rx), + incoming_tx, + running: Arc::new(tokio::sync::RwLock::new(false)), + }; + + (manager, incoming_rx) + } + + /// Start the network manager + pub async fn start(&mut self) -> Result<()> { + let mut running = self.running.write().await; + if *running { + return Ok(()); + } + + info!("Starting network manager for node {}", self.config.node_id); + + // Start transport + self.transport.clone().start().await?; + + // Start outgoing message handler + if let Some(mut outgoing_rx) = self.outgoing_rx.take() { + let transport = self.transport.clone(); + let running_clone = self.running.clone(); + + tokio::spawn(async move { + info!("Starting outgoing message handler"); + + while let Some(msg) = outgoing_rx.recv().await { + if !*running_clone.read().await { + break; + } + + let to = msg.get_to(); + debug!( + "Sending message to peer {}, type: {:?}", + to, + msg.get_msg_type() + ); + + if let Err(e) = transport.send_to_peer(to, msg).await { + error!("Failed to send message to peer {}: {}", to, e); + } + } + + info!("Outgoing message handler stopped"); + }); + } + + *running = true; + info!("Network manager started successfully"); + + Ok(()) + } + + /// Shutdown the network manager + pub async fn shutdown(&self) -> Result<()> { + let mut running = self.running.write().await; + if !*running { + return Ok(()); + } + + info!("Shutting down network manager"); + *running = false; + + Ok(()) + } + + /// Get the incoming message sender + pub fn incoming_sender(&self) -> mpsc::UnboundedSender { + self.incoming_tx.clone() + } + + /// Send a message to a peer + pub async fn send_message(&self, msg: Message) -> Result<()> { + self.transport.send_to_peer(msg.get_to(), msg).await + } + + /// Broadcast a message to all peers + pub async fn broadcast_message(&self, msg: Message) -> Result<()> { + self.transport.broadcast(msg).await + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::RaftPeer; + + #[tokio::test] + async fn test_network_manager_creation() { + let config = Arc::new(ControllerConfig::test_config()); + + let (manager, _rx) = NetworkManager::new(config); + assert!(!*manager.running.read().await); + } +} diff --git a/rocketmq-controller/src/raft/node.rs b/rocketmq-controller/src/raft/node.rs new file mode 100644 index 00000000..3010d979 --- /dev/null +++ b/rocketmq-controller/src/raft/node.rs @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashMap; +use std::sync::Arc; + +use raft::prelude::*; +use raft::storage::MemStorage as RaftMemStorage; +use raft::StateRole; +use tokio::sync::RwLock; +use tracing::debug; +use tracing::info; + +use crate::config::ControllerConfig; +use crate::error::ControllerError; +use crate::error::Result; + +/// Type alias for proposal map to reduce complexity +type ProposalMap = HashMap, tokio::sync::oneshot::Sender>>>; + +/// Raft node wrapper +pub struct RaftNode { + /// Node ID + id: u64, + + /// Raft raw node + raw_node: Arc>>, + + /// Pending proposals + proposals: Arc>, +} + +impl RaftNode { + /// Create a new Raft node + pub async fn new(id: u64, config: Arc) -> Result { + // Create Raft configuration + let raft_config = Config { + id, + election_tick: (config.election_timeout_ms / 100) as usize, + heartbeat_tick: (config.heartbeat_interval_ms / 100) as usize, + max_size_per_msg: 1024 * 1024, + max_inflight_msgs: 256, + ..Default::default() + }; + raft_config + .validate() + .map_err(|e| ControllerError::ConfigError(format!("Invalid Raft config: {:?}", e)))?; + + // Create storage + let storage = RaftMemStorage::new(); + + // Initialize peers + let peers: Vec = config.raft_peers.iter().map(|p| p.id).collect(); + if !peers.is_empty() { + let mut snapshot = Snapshot::default(); + snapshot.mut_metadata().index = 0; + snapshot.mut_metadata().term = 0; + snapshot + .mut_metadata() + .mut_conf_state() + .voters + .clone_from(&peers); + + storage + .wl() + .apply_snapshot(snapshot) + .map_err(|e| ControllerError::Raft(format!("Failed to apply snapshot: {:?}", e)))?; + } + + // Create raw node + let raw_node = RawNode::new(&raft_config, storage, &slog_global::get_global()) + .map_err(|e| ControllerError::Raft(format!("Failed to create RawNode: {:?}", e)))?; + + info!("Created Raft node {} with peers: {:?}", id, peers); + + Ok(Self { + id, + raw_node: Arc::new(RwLock::new(raw_node)), + proposals: Arc::new(RwLock::new(HashMap::new())), + }) + } + + /// Propose a new entry + pub async fn propose(&self, data: Vec) -> Result> { + let mut raw_node = self.raw_node.write().await; + + // Check if we are the leader + if raw_node.raft.state != StateRole::Leader { + let leader = raw_node.raft.leader_id; + return Err(ControllerError::NotLeader { + leader_id: if leader == 0 { None } else { Some(leader) }, + }); + } + + // Propose the entry + raw_node + .propose(vec![], data.clone()) + .map_err(|e| ControllerError::Raft(format!("Failed to propose: {:?}", e)))?; + + // For now, return immediately + // In a real implementation, we would wait for the entry to be committed + Ok(data) + } + + /// Query current state (read-only) + pub async fn query(&self, _data: Vec) -> Result> { + let raw_node = self.raw_node.read().await; + + // Check if we are the leader + if raw_node.raft.state != StateRole::Leader { + let leader = raw_node.raft.leader_id; + return Err(ControllerError::NotLeader { + leader_id: if leader == 0 { None } else { Some(leader) }, + }); + } + + // For now, return empty response + // In a real implementation, we would query the state machine + Ok(vec![]) + } + + /// Step the Raft state machine with a message + pub async fn step(&self, message: Message) -> Result<()> { + let mut raw_node = self.raw_node.write().await; + raw_node + .step(message) + .map_err(|e| ControllerError::Raft(format!("Failed to step: {:?}", e)))?; + Ok(()) + } + + /// Tick the Raft state machine + pub async fn tick(&self) -> Result<()> { + let mut raw_node = self.raw_node.write().await; + raw_node.tick(); + + // Process ready + if raw_node.has_ready() { + let mut ready = raw_node.ready(); + + // Handle messages + if !ready.messages().is_empty() { + // In a real implementation, send these messages to peers + debug!("Need to send {} messages to peers", ready.messages().len()); + } + + // Handle committed entries + for entry in ready.take_committed_entries() { + if entry.data.is_empty() { + // Empty entry, from leadership transfer + continue; + } + // Apply to state machine + debug!("Applying entry: {:?}", entry); + } + + // Advance the Raft + let light_rd = raw_node.advance(ready); + if let Some(commit) = light_rd.commit_index() { + raw_node.mut_store().wl().commit_to(commit).ok(); + } + raw_node.advance_apply(); + } + + Ok(()) + } + + /// Check if this node is the leader + pub async fn is_leader(&self) -> bool { + let raw_node = self.raw_node.read().await; + raw_node.raft.state == StateRole::Leader + } + + /// Get the current leader ID + pub async fn get_leader(&self) -> Option { + let raw_node = self.raw_node.read().await; + let leader = raw_node.raft.leader_id; + if leader == 0 { + None + } else { + Some(leader) + } + } +} + +// Helper for slog logger +mod slog_global { + use slog::o; + use slog::Discard; + use slog::Logger; + + pub fn get_global() -> Logger { + Logger::root(Discard, o!()) + } +} diff --git a/rocketmq-controller/src/raft/storage.rs b/rocketmq-controller/src/raft/storage.rs new file mode 100644 index 00000000..9dad594e --- /dev/null +++ b/rocketmq-controller/src/raft/storage.rs @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/// Re-export MemStorage from raft-rs +/// +/// In a production implementation, this should be replaced with +/// a persistent storage implementation that writes to disk. +pub use raft::storage::MemStorage; + +// Future implementations should: +// - Persist entries to disk (e.g., using RocksDB or custom log file) +// - Support efficient snapshot creation and restoration +// - Handle concurrent reads and writes +// - Provide durability guarantees +// +// Example structure: +// ```ignore +// pub struct PersistentStorage { +// path: PathBuf, +// db: Arc, +// // ... other fields +// } +// +// impl Storage for PersistentStorage { +// // Implement Storage trait methods +// } +// ``` diff --git a/rocketmq-controller/src/raft/transport.rs b/rocketmq-controller/src/raft/transport.rs new file mode 100644 index 00000000..085183da --- /dev/null +++ b/rocketmq-controller/src/raft/transport.rs @@ -0,0 +1,370 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashMap; +use std::net::SocketAddr; +use std::sync::Arc; + +use bytes::Bytes; +use bytes::BytesMut; +use protobuf::Message as ProtobufMessage; +use raft::eraftpb; +use raft::prelude::Message; +use tokio::io::AsyncReadExt; +use tokio::io::AsyncWriteExt; +use tokio::net::TcpListener; +use tokio::net::TcpStream; +use tokio::sync::mpsc; +use tokio::sync::RwLock; +use tracing::debug; +use tracing::error; +use tracing::info; +use tracing::warn; + +use crate::error::ControllerError; +use crate::error::Result; + +/// Message codec for Raft messages +pub struct MessageCodec; + +impl MessageCodec { + /// Encode a Raft message to bytes using protobuf + pub fn encode(msg: &Message) -> Result { + // Convert to protobuf message + let proto_msg: eraftpb::Message = msg.clone(); + + // Encode using protobuf v2 + let encoded = proto_msg + .write_to_bytes() + .map_err(|e| ControllerError::SerializationError(e.to_string()))?; + + // Length prefix (4 bytes) + message data + let len = encoded.len() as u32; + let mut result = BytesMut::with_capacity(4 + encoded.len()); + result.extend_from_slice(&len.to_be_bytes()); + result.extend_from_slice(&encoded); + + Ok(result.freeze()) + } + + /// Decode bytes to a Raft message using protobuf + pub async fn decode(stream: &mut TcpStream) -> Result { + // Read length prefix + let mut len_buf = [0u8; 4]; + stream + .read_exact(&mut len_buf) + .await + .map_err(|e| ControllerError::NetworkError(e.to_string()))?; + + let len = u32::from_be_bytes(len_buf) as usize; + + // Validate length + if len > 10 * 1024 * 1024 { + return Err(ControllerError::InvalidRequest(format!( + "Message too large: {} bytes", + len + ))); + } + + // Read message data + let mut buf = vec![0u8; len]; + stream + .read_exact(&mut buf) + .await + .map_err(|e| ControllerError::NetworkError(e.to_string()))?; + + // Deserialize using protobuf v2 + let proto_msg = eraftpb::Message::parse_from_bytes(&buf) + .map_err(|e| ControllerError::SerializationError(e.to_string()))?; + + // Convert to raft Message + Ok(proto_msg) + } +} + +/// Connection to a peer +pub struct PeerConnection { + /// Peer ID + peer_id: u64, + + /// Peer address + addr: SocketAddr, + + /// TCP stream + stream: Option, + + /// Send queue + tx: mpsc::UnboundedSender, + + /// Receive handler + rx: mpsc::UnboundedReceiver, +} + +impl PeerConnection { + /// Create a new peer connection + pub fn new(peer_id: u64, addr: SocketAddr) -> Self { + let (tx, rx) = mpsc::unbounded_channel(); + + Self { + peer_id, + addr, + stream: None, + tx, + rx, + } + } + + /// Connect to the peer + pub async fn connect(&mut self) -> Result<()> { + debug!("Connecting to peer {} at {}", self.peer_id, self.addr); + + match TcpStream::connect(self.addr).await { + Ok(stream) => { + info!( + "Successfully connected to peer {} at {}", + self.peer_id, self.addr + ); + self.stream = Some(stream); + Ok(()) + } + Err(e) => { + warn!( + "Failed to connect to peer {} at {}: {}", + self.peer_id, self.addr, e + ); + Err(ControllerError::NetworkError(e.to_string())) + } + } + } + + /// Send a message to the peer + pub async fn send(&mut self, msg: Message) -> Result<()> { + // Ensure we're connected + if self.stream.is_none() { + self.connect().await?; + } + + let stream = self + .stream + .as_mut() + .ok_or_else(|| ControllerError::NetworkError("Not connected".to_string()))?; + + // Encode message + let bytes = MessageCodec::encode(&msg)?; + + // Send + stream.write_all(&bytes).await.map_err(|e| { + error!("Failed to send message to peer {}: {}", self.peer_id, e); + self.stream = None; // Reset connection on error + ControllerError::NetworkError(e.to_string()) + })?; + + debug!( + "Sent message to peer {}, type: {:?}", + self.peer_id, + msg.get_msg_type() + ); + Ok(()) + } + + /// Receive a message from the peer + pub async fn receive(&mut self) -> Result { + let stream = self + .stream + .as_mut() + .ok_or_else(|| ControllerError::NetworkError("Not connected".to_string()))?; + + MessageCodec::decode(stream).await + } + + /// Get the sender channel + pub fn sender(&self) -> mpsc::UnboundedSender { + self.tx.clone() + } +} + +/// Network transport for Raft +pub struct RaftTransport { + /// Node ID + node_id: u64, + + /// Listen address + listen_addr: SocketAddr, + + /// Peer connections + peers: Arc>>>>, + + /// Message receiver from Raft + message_tx: mpsc::UnboundedSender, + + /// Incoming message sender to Raft + incoming_tx: mpsc::UnboundedSender, +} + +impl RaftTransport { + /// Create a new transport + pub fn new( + node_id: u64, + listen_addr: SocketAddr, + peer_addrs: HashMap, + ) -> ( + Self, + mpsc::UnboundedReceiver, + mpsc::UnboundedReceiver, + ) { + let (message_tx, message_rx) = mpsc::unbounded_channel(); + let (incoming_tx, incoming_rx) = mpsc::unbounded_channel(); + + let mut peers = HashMap::new(); + for (peer_id, addr) in peer_addrs { + if peer_id != node_id { + let conn = PeerConnection::new(peer_id, addr); + peers.insert(peer_id, Arc::new(RwLock::new(conn))); + } + } + + let transport = Self { + node_id, + listen_addr, + peers: Arc::new(RwLock::new(peers)), + message_tx, + incoming_tx, + }; + + (transport, message_rx, incoming_rx) + } + + /// Start the transport + pub async fn start(self: Arc) -> Result<()> { + info!("Starting Raft transport on {}", self.listen_addr); + + // Start listening for incoming connections + let self_clone = self.clone(); + tokio::spawn(async move { + if let Err(e) = self_clone.accept_loop().await { + error!("Accept loop error: {}", e); + } + }); + + // Start message sending loop + let self_clone = self.clone(); + tokio::spawn(async move { + if let Err(e) = self_clone.send_loop().await { + error!("Send loop error: {}", e); + } + }); + + info!("Raft transport started successfully"); + Ok(()) + } + + /// Accept incoming connections + async fn accept_loop(&self) -> Result<()> { + let listener = TcpListener::bind(self.listen_addr) + .await + .map_err(|e| ControllerError::NetworkError(e.to_string()))?; + + info!("Listening for Raft connections on {}", self.listen_addr); + + loop { + match listener.accept().await { + Ok((mut stream, addr)) => { + debug!("Accepted connection from {}", addr); + + let incoming_tx = self.incoming_tx.clone(); + tokio::spawn(async move { + loop { + match MessageCodec::decode(&mut stream).await { + Ok(msg) => { + debug!( + "Received message from {}: {:?}", + addr, + msg.get_msg_type() + ); + if incoming_tx.send(msg).is_err() { + warn!("Failed to forward incoming message"); + break; + } + } + Err(e) => { + error!("Failed to decode message from {}: {}", addr, e); + break; + } + } + } + }); + } + Err(e) => { + error!("Failed to accept connection: {}", e); + } + } + } + } + + /// Send messages to peers + async fn send_loop(&self) -> Result<()> { + // This will be implemented to actually send messages + // For now, it's a placeholder + Ok(()) + } + + /// Send a message to a specific peer + pub async fn send_to_peer(&self, peer_id: u64, msg: Message) -> Result<()> { + debug!("Sending message to peer {}", peer_id); + + let peers = self.peers.read().await; + let peer = peers + .get(&peer_id) + .ok_or_else(|| ControllerError::NetworkError(format!("Unknown peer: {}", peer_id)))?; + + let mut conn = peer.write().await; + conn.send(msg).await + } + + /// Broadcast a message to all peers + pub async fn broadcast(&self, msg: Message) -> Result<()> { + debug!("Broadcasting message to all peers"); + + let peers = self.peers.read().await; + for (peer_id, peer) in peers.iter() { + let mut conn = peer.write().await; + if let Err(e) = conn.send(msg.clone()).await { + warn!("Failed to send message to peer {}: {}", peer_id, e); + } + } + + Ok(()) + } + + /// Get the message sender + pub fn message_sender(&self) -> mpsc::UnboundedSender { + self.message_tx.clone() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_peer_connection_creation() { + let addr: SocketAddr = "127.0.0.1:9876".parse().unwrap(); + let conn = PeerConnection::new(1, addr); + assert_eq!(conn.peer_id, 1); + assert_eq!(conn.addr, addr); + } +} diff --git a/rocketmq-controller/src/rpc/codec.rs b/rocketmq-controller/src/rpc/codec.rs new file mode 100644 index 00000000..c11050db --- /dev/null +++ b/rocketmq-controller/src/rpc/codec.rs @@ -0,0 +1,282 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use bytes::Buf; +use bytes::BufMut; +use bytes::BytesMut; +use serde::Deserialize; +use serde::Serialize; +use tokio_util::codec::Decoder; +use tokio_util::codec::Encoder; +use tracing::debug; +use tracing::trace; + +use crate::error::ControllerError; +use crate::error::Result; +use crate::processor::RequestType; + +/// RPC request message +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RpcRequest { + /// Request ID for correlation + pub request_id: u64, + + /// Request type + pub request_type: RequestType, + + /// Request payload (JSON-encoded) + pub payload: Vec, +} + +/// RPC response message +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RpcResponse { + /// Request ID for correlation + pub request_id: u64, + + /// Success flag + pub success: bool, + + /// Error message if failed + pub error: Option, + + /// Response payload (JSON-encoded) + pub payload: Vec, +} + +/// RPC message codec +/// +/// Protocol format: +/// ```text +/// +--------+--------+--------+--------+ +/// | Length (4 bytes, big-endian) | +/// +--------+--------+--------+--------+ +/// | JSON-encoded message | +/// | ... | +/// +-----------------------------------+ +/// ``` +pub struct RpcCodec; + +impl RpcCodec { + /// Maximum frame size (16MB) + const MAX_FRAME_SIZE: usize = 16 * 1024 * 1024; + + /// Create a new RPC codec + pub fn new() -> Self { + Self + } +} + +impl Default for RpcCodec { + fn default() -> Self { + Self::new() + } +} + +impl Decoder for RpcCodec { + type Item = RpcRequest; + type Error = ControllerError; + + fn decode(&mut self, src: &mut BytesMut) -> Result> { + // Check if we have enough bytes for the length prefix + if src.len() < 4 { + trace!("Not enough bytes for length prefix: {}", src.len()); + return Ok(None); + } + + // Read the length prefix + let mut length_bytes = [0u8; 4]; + length_bytes.copy_from_slice(&src[..4]); + let length = u32::from_be_bytes(length_bytes) as usize; + + trace!("RPC request length: {}", length); + + // Validate length + if length > Self::MAX_FRAME_SIZE { + return Err(ControllerError::InvalidRequest(format!( + "Frame size {} exceeds maximum {}", + length, + Self::MAX_FRAME_SIZE + ))); + } + + // Check if we have the complete frame + if src.len() < 4 + length { + trace!("Incomplete frame: have {}, need {}", src.len(), 4 + length); + // Reserve space for the rest of the frame + src.reserve(4 + length - src.len()); + return Ok(None); + } + + // Skip the length prefix + src.advance(4); + + // Read the frame data + let data = src.split_to(length); + + // Deserialize the request + let request: RpcRequest = serde_json::from_slice(&data) + .map_err(|e| ControllerError::InvalidRequest(e.to_string()))?; + + debug!( + "Decoded RPC request: id={}, type={:?}", + request.request_id, request.request_type + ); + + Ok(Some(request)) + } +} + +impl Encoder for RpcCodec { + type Error = ControllerError; + + fn encode(&mut self, item: RpcResponse, dst: &mut BytesMut) -> Result<()> { + debug!( + "Encoding RPC response: id={}, success={}", + item.request_id, item.success + ); + + // Serialize the response + let data = serde_json::to_vec(&item) + .map_err(|e| ControllerError::SerializationError(e.to_string()))?; + + // Check size + if data.len() > Self::MAX_FRAME_SIZE { + return Err(ControllerError::SerializationError(format!( + "Response size {} exceeds maximum {}", + data.len(), + Self::MAX_FRAME_SIZE + ))); + } + + // Write length prefix + let length = data.len() as u32; + dst.reserve(4 + data.len()); + dst.put_u32(length); + + // Write data + dst.put_slice(&data); + + trace!("Encoded RPC response: {} bytes", 4 + data.len()); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_rpc_request_serialization() { + let request = RpcRequest { + request_id: 123, + request_type: RequestType::RegisterBroker, + payload: b"test payload".to_vec(), + }; + + let serialized = serde_json::to_vec(&request).unwrap(); + let deserialized: RpcRequest = serde_json::from_slice(&serialized).unwrap(); + + assert_eq!(deserialized.request_id, request.request_id); + assert_eq!(deserialized.request_type, request.request_type); + assert_eq!(deserialized.payload, request.payload); + } + + #[test] + fn test_rpc_response_serialization() { + let response = RpcResponse { + request_id: 456, + success: true, + error: None, + payload: b"response payload".to_vec(), + }; + + let serialized = serde_json::to_vec(&response).unwrap(); + let deserialized: RpcResponse = serde_json::from_slice(&serialized).unwrap(); + + assert_eq!(deserialized.request_id, response.request_id); + assert_eq!(deserialized.success, response.success); + assert_eq!(deserialized.error, response.error); + assert_eq!(deserialized.payload, response.payload); + } + + #[test] + fn test_codec_decode_incomplete() { + let mut codec = RpcCodec::new(); + let mut buf = BytesMut::new(); + + // Write only 2 bytes of length prefix + buf.put_u16(0x00); + + let result = codec.decode(&mut buf); + assert!(result.is_ok()); + assert!(result.unwrap().is_none()); + } + + #[test] + fn test_codec_encode_decode() { + let mut codec = RpcCodec::new(); + + // Create a request + let request = RpcRequest { + request_id: 789, + request_type: RequestType::BrokerHeartbeat, + payload: b"heartbeat data".to_vec(), + }; + + // Serialize manually to get the data + let request_data = serde_json::to_vec(&request).unwrap(); + + // Create a buffer with length prefix + data + let mut encode_buf = BytesMut::new(); + encode_buf.put_u32(request_data.len() as u32); + encode_buf.put_slice(&request_data); + + // Decode + let decoded = codec.decode(&mut encode_buf).unwrap(); + assert!(decoded.is_some()); + + let decoded_request = decoded.unwrap(); + assert_eq!(decoded_request.request_id, request.request_id); + assert_eq!(decoded_request.request_type, request.request_type); + assert_eq!(decoded_request.payload, request.payload); + } + + #[test] + fn test_codec_encode_response() { + let mut codec = RpcCodec::new(); + let mut buf = BytesMut::new(); + + let response = RpcResponse { + request_id: 999, + success: true, + error: None, + payload: b"success response".to_vec(), + }; + + let result = codec.encode(response.clone(), &mut buf); + assert!(result.is_ok()); + + // Check that length prefix is present + assert!(buf.len() >= 4); + + // Read length + let length = u32::from_be_bytes([buf[0], buf[1], buf[2], buf[3]]) as usize; + assert_eq!(buf.len(), 4 + length); + } +} diff --git a/rocketmq-controller/src/rpc/mod.rs b/rocketmq-controller/src/rpc/mod.rs new file mode 100644 index 00000000..d0ced905 --- /dev/null +++ b/rocketmq-controller/src/rpc/mod.rs @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! RPC module for Controller +//! +//! This module provides the RPC server and codec for handling +//! requests from brokers and clients. + +pub mod codec; +pub mod server; + +pub use codec::RpcCodec; +pub use codec::RpcRequest; +pub use codec::RpcResponse; +pub use server::RpcServer; diff --git a/rocketmq-controller/src/rpc/server.rs b/rocketmq-controller/src/rpc/server.rs new file mode 100644 index 00000000..b8d12a94 --- /dev/null +++ b/rocketmq-controller/src/rpc/server.rs @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::net::SocketAddr; +use std::sync::Arc; + +use futures::stream::StreamExt; +use futures::SinkExt; +use tokio::net::TcpListener; +use tokio::net::TcpStream; +use tokio::sync::RwLock; +use tokio_util::codec::Framed; +use tracing::debug; +use tracing::error; +use tracing::info; +use tracing::warn; + +use crate::error::Result; +use crate::processor::ProcessorManager; +use crate::rpc::codec::RpcCodec; +use crate::rpc::codec::RpcRequest; +use crate::rpc::codec::RpcResponse; + +/// RPC server +/// +/// Handles incoming TCP connections from brokers and clients, +/// decodes RPC requests, routes them to appropriate processors, +/// and sends back responses. +pub struct RpcServer { + /// Listen address + listen_addr: SocketAddr, + + /// Processor manager + processor_manager: Arc, + + /// Server state + state: Arc>, +} + +/// Server state +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum ServerState { + /// Server is not started + Stopped, + + /// Server is running + Running, + + /// Server is shutting down + ShuttingDown, +} + +impl RpcServer { + /// Create a new RPC server + pub fn new(listen_addr: SocketAddr, processor_manager: Arc) -> Self { + Self { + listen_addr, + processor_manager, + state: Arc::new(RwLock::new(ServerState::Stopped)), + } + } + + /// Start the RPC server + pub async fn start(&self) -> Result<()> { + // Check state + { + let mut state = self.state.write().await; + if *state != ServerState::Stopped { + warn!("RPC server already started"); + return Ok(()); + } + *state = ServerState::Running; + } + + info!("Starting RPC server on {}", self.listen_addr); + + // Bind to the address + let listener = TcpListener::bind(self.listen_addr).await?; + info!("RPC server listening on {}", self.listen_addr); + + // Clone Arc for the task + let processor_manager = self.processor_manager.clone(); + let state = self.state.clone(); + + // Spawn accept loop + tokio::spawn(async move { + loop { + // Check if we should stop + { + let current_state = state.read().await; + if *current_state == ServerState::ShuttingDown { + info!("RPC server accept loop stopping"); + break; + } + } + + // Accept new connection + match listener.accept().await { + Ok((stream, addr)) => { + debug!("Accepted connection from {}", addr); + + // Spawn handler for this connection + let processor_manager = processor_manager.clone(); + tokio::spawn(async move { + if let Err(e) = + Self::handle_connection(stream, addr, processor_manager).await + { + error!("Error handling connection from {}: {}", addr, e); + } + }); + } + Err(e) => { + error!("Failed to accept connection: {}", e); + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + } + } + } + }); + + Ok(()) + } + + /// Handle a single connection + async fn handle_connection( + stream: TcpStream, + addr: SocketAddr, + processor_manager: Arc, + ) -> Result<()> { + info!("Handling connection from {}", addr); + + // Create framed stream with codec + let mut framed = Framed::new(stream, RpcCodec::new()); + + // Process requests + while let Some(result) = framed.next().await { + match result { + Ok(request) => { + debug!( + "Received request from {}: id={}, type={:?}", + addr, request.request_id, request.request_type + ); + + // Process the request + let response = Self::process_request(request, &processor_manager).await; + + // Send response + if let Err(e) = framed.send(response).await { + error!("Failed to send response to {}: {}", addr, e); + break; + } + } + Err(e) => { + error!("Failed to decode request from {}: {}", addr, e); + break; + } + } + } + + info!("Connection from {} closed", addr); + Ok(()) + } + + /// Process a single request + async fn process_request( + request: RpcRequest, + processor_manager: &Arc, + ) -> RpcResponse { + debug!( + "Processing request: id={}, type={:?}", + request.request_id, request.request_type + ); + + // Process the request + match processor_manager + .process_request(request.request_type.clone(), &request.payload) + .await + { + Ok(response_data) => { + debug!("Request {} processed successfully", request.request_id); + RpcResponse { + request_id: request.request_id, + success: true, + error: None, + payload: response_data, + } + } + Err(e) => { + error!("Failed to process request {}: {}", request.request_id, e); + RpcResponse { + request_id: request.request_id, + success: false, + error: Some(e.to_string()), + payload: Vec::new(), + } + } + } + } + + /// Shutdown the RPC server + pub async fn shutdown(&self) -> Result<()> { + info!("Shutting down RPC server"); + + // Update state + { + let mut state = self.state.write().await; + *state = ServerState::ShuttingDown; + } + + // Wait a bit for accept loop to stop + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + + // Update state + { + let mut state = self.state.write().await; + *state = ServerState::Stopped; + } + + info!("RPC server stopped"); + Ok(()) + } + + /// Check if server is running + pub async fn is_running(&self) -> bool { + let state = self.state.read().await; + *state == ServerState::Running + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_rpc_server_state() { + // Test that we can work with server states + let state = ServerState::Stopped; + assert_eq!(state, ServerState::Stopped); + assert_ne!(state, ServerState::Running); + + // Test codec creation + let _codec = RpcCodec::new(); + } +} diff --git a/rocketmq-controller/src/storage/file_backend.rs b/rocketmq-controller/src/storage/file_backend.rs new file mode 100644 index 00000000..a1d3a1bf --- /dev/null +++ b/rocketmq-controller/src/storage/file_backend.rs @@ -0,0 +1,411 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::collections::HashMap; +use std::path::PathBuf; +use std::sync::Arc; + +use async_trait::async_trait; +use parking_lot::RwLock; +use tokio::fs; +use tokio::io::AsyncReadExt; +use tokio::io::AsyncWriteExt; +use tracing::debug; +use tracing::info; +use tracing::warn; + +use crate::error::ControllerError; +use crate::error::Result; +use crate::storage::StorageBackend; +use crate::storage::StorageStats; + +/// File-based storage backend +/// +/// Provides persistent storage using individual files for each key. +/// This is simpler than RocksDB but less performant for large datasets. +/// +/// File structure: +/// ```text +/// data_dir/ +/// ├── metadata.json (index of all keys) +/// └── data/ +/// ├── .dat +/// ├── .dat +/// └── ... +/// ``` +pub struct FileBackend { + /// Data directory path + path: PathBuf, + + /// In-memory index: key -> file path + index: Arc>>, +} + +impl FileBackend { + /// Create a new file-based backend + pub async fn new(path: PathBuf) -> Result { + info!("Opening file-based storage at {:?}", path); + + // Create directories + fs::create_dir_all(&path).await.map_err(|e| { + ControllerError::StorageError(format!("Failed to create directory: {}", e)) + })?; + + let data_dir = path.join("data"); + fs::create_dir_all(&data_dir).await.map_err(|e| { + ControllerError::StorageError(format!("Failed to create data directory: {}", e)) + })?; + + let backend = Self { + path, + index: Arc::new(RwLock::new(HashMap::new())), + }; + + // Load existing index + backend.load_index().await?; + + info!("File-based storage opened successfully"); + + Ok(backend) + } + + /// Get the storage path + pub fn path(&self) -> &PathBuf { + &self.path + } + + /// Load index from metadata file + async fn load_index(&self) -> Result<()> { + let metadata_path = self.path.join("metadata.json"); + + if !metadata_path.exists() { + info!("No existing index found, starting fresh"); + return Ok(()); + } + + let content = fs::read(&metadata_path).await.map_err(|e| { + ControllerError::StorageError(format!("Failed to read metadata: {}", e)) + })?; + + let loaded_index: HashMap = + serde_json::from_slice(&content).map_err(|e| { + ControllerError::SerializationError(format!("Failed to parse metadata: {}", e)) + })?; + + *self.index.write() = loaded_index; + + info!("Loaded index with {} keys", self.index.read().len()); + + Ok(()) + } + + /// Save index to metadata file + async fn save_index(&self) -> Result<()> { + let metadata_path = self.path.join("metadata.json"); + + let index = self.index.read().clone(); + let content = serde_json::to_vec_pretty(&index).map_err(|e| { + ControllerError::SerializationError(format!("Failed to serialize metadata: {}", e)) + })?; + + fs::write(&metadata_path, content).await.map_err(|e| { + ControllerError::StorageError(format!("Failed to write metadata: {}", e)) + })?; + + Ok(()) + } + + /// Get file path for a key + fn get_file_path(&self, key: &str) -> PathBuf { + // Use hash to avoid file system issues with special characters + let hash = Self::hash_key(key); + self.path.join("data").join(format!("{}.dat", hash)) + } + + /// Hash a key to generate a filename + fn hash_key(key: &str) -> String { + // Simple hash function - use the key itself if safe, otherwise hash it + if key + .chars() + .all(|c| c.is_alphanumeric() || c == '_' || c == '-') + { + key.to_string() + } else { + // Use a simple hash for keys with special characters + format!("{:x}", Self::simple_hash(key)) + } + } + + /// Simple hash function + fn simple_hash(s: &str) -> u64 { + let mut hash = 0u64; + for byte in s.bytes() { + hash = hash.wrapping_mul(31).wrapping_add(byte as u64); + } + hash + } +} + +#[async_trait] +impl StorageBackend for FileBackend { + async fn put(&self, key: &str, value: &[u8]) -> Result<()> { + debug!("File put: key={}, size={}", key, value.len()); + + let file_path = self.get_file_path(key); + + // Write data to file + let mut file = fs::File::create(&file_path) + .await + .map_err(|e| ControllerError::StorageError(format!("Failed to create file: {}", e)))?; + + file.write_all(value) + .await + .map_err(|e| ControllerError::StorageError(format!("Failed to write file: {}", e)))?; + + file.sync_all() + .await + .map_err(|e| ControllerError::StorageError(format!("Failed to sync file: {}", e)))?; + + // Update index + self.index.write().insert(key.to_string(), file_path); + + // Save index periodically (every 10 operations) + if self.index.read().len() % 10 == 0 { + self.save_index().await?; + } + + Ok(()) + } + + async fn get(&self, key: &str) -> Result>> { + debug!("File get: key={}", key); + + let file_path = match self.index.read().get(key) { + Some(path) => path.clone(), + None => return Ok(None), + }; + + // Check if file exists + if !file_path.exists() { + warn!("File not found for key: {}", key); + self.index.write().remove(key); + return Ok(None); + } + + // Read file + let mut file = fs::File::open(&file_path) + .await + .map_err(|e| ControllerError::StorageError(format!("Failed to open file: {}", e)))?; + + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer) + .await + .map_err(|e| ControllerError::StorageError(format!("Failed to read file: {}", e)))?; + + Ok(Some(buffer)) + } + + async fn delete(&self, key: &str) -> Result<()> { + debug!("File delete: key={}", key); + + let file_path = match self.index.write().remove(key) { + Some(path) => path, + None => return Ok(()), // Key doesn't exist + }; + + // Delete file + if file_path.exists() { + fs::remove_file(&file_path).await.map_err(|e| { + ControllerError::StorageError(format!("Failed to delete file: {}", e)) + })?; + } + + // Save index + self.save_index().await?; + + Ok(()) + } + + async fn list_keys(&self, prefix: &str) -> Result> { + debug!("File list_keys: prefix={}", prefix); + + let keys: Vec = self + .index + .read() + .keys() + .filter(|k| k.starts_with(prefix)) + .cloned() + .collect(); + + Ok(keys) + } + + async fn batch_put(&self, items: Vec<(String, Vec)>) -> Result<()> { + debug!("File batch_put: {} items", items.len()); + + for (key, value) in items { + self.put(&key, &value).await?; + } + + // Save index after batch + self.save_index().await?; + + Ok(()) + } + + async fn batch_delete(&self, keys: Vec) -> Result<()> { + debug!("File batch_delete: {} keys", keys.len()); + + for key in keys { + self.delete(&key).await?; + } + + Ok(()) + } + + async fn exists(&self, key: &str) -> Result { + debug!("File exists: key={}", key); + Ok(self.index.read().contains_key(key)) + } + + async fn clear(&self) -> Result<()> { + info!("File clear: removing all data"); + + // Delete all files + let data_dir = self.path.join("data"); + if data_dir.exists() { + fs::remove_dir_all(&data_dir).await.map_err(|e| { + ControllerError::StorageError(format!("Failed to clear data: {}", e)) + })?; + + fs::create_dir_all(&data_dir).await.map_err(|e| { + ControllerError::StorageError(format!("Failed to recreate data directory: {}", e)) + })?; + } + + // Clear index + self.index.write().clear(); + self.save_index().await?; + + Ok(()) + } + + async fn sync(&self) -> Result<()> { + debug!("File sync"); + self.save_index().await + } + + async fn stats(&self) -> Result { + debug!("File stats"); + + // Clone the paths to avoid holding the lock across await + let paths: Vec = { + let index = self.index.read(); + index.values().cloned().collect() + }; + + let key_count = paths.len(); + + // Calculate total size + let mut total_size = 0u64; + for path in paths { + if path.exists() { + if let Ok(metadata) = fs::metadata(&path).await { + total_size += metadata.len(); + } + } + } + + Ok(StorageStats { + key_count, + total_size, + backend_info: format!("File-based storage at {:?}", self.path), + }) + } +} + +#[cfg(test)] +mod tests { + use tempfile::TempDir; + + use super::*; + + #[tokio::test] + async fn test_file_backend() { + let temp_dir = TempDir::new().unwrap(); + let storage_path = temp_dir.path().join("file_storage"); + + let backend = FileBackend::new(storage_path).await.unwrap(); + + // Test put and get + backend.put("test_key", b"test_value").await.unwrap(); + let value = backend.get("test_key").await.unwrap(); + assert_eq!(value, Some(b"test_value".to_vec())); + + // Test exists + assert!(backend.exists("test_key").await.unwrap()); + assert!(!backend.exists("nonexistent").await.unwrap()); + + // Test delete + backend.delete("test_key").await.unwrap(); + assert!(!backend.exists("test_key").await.unwrap()); + + // Test batch operations + let items = vec![ + ("batch_1".to_string(), b"value1".to_vec()), + ("batch_2".to_string(), b"value2".to_vec()), + ]; + backend.batch_put(items).await.unwrap(); + + assert!(backend.exists("batch_1").await.unwrap()); + assert!(backend.exists("batch_2").await.unwrap()); + + // Test list_keys + backend.put("prefix_1", b"value1").await.unwrap(); + backend.put("prefix_2", b"value2").await.unwrap(); + + let keys = backend.list_keys("prefix_").await.unwrap(); + assert_eq!(keys.len(), 2); + + // Test stats + let stats = backend.stats().await.unwrap(); + assert!(stats.key_count >= 4); + + // Test sync + backend.sync().await.unwrap(); + } + + #[tokio::test] + async fn test_file_backend_persistence() { + let temp_dir = TempDir::new().unwrap(); + let storage_path = temp_dir.path().join("persistent_storage"); + + // First session: write data + { + let backend = FileBackend::new(storage_path.clone()).await.unwrap(); + backend.put("persist_key", b"persist_value").await.unwrap(); + backend.sync().await.unwrap(); + } + + // Second session: read data + { + let backend = FileBackend::new(storage_path).await.unwrap(); + let value = backend.get("persist_key").await.unwrap(); + assert_eq!(value, Some(b"persist_value".to_vec())); + } + } +} diff --git a/rocketmq-controller/src/storage/mod.rs b/rocketmq-controller/src/storage/mod.rs new file mode 100644 index 00000000..2cc57ccc --- /dev/null +++ b/rocketmq-controller/src/storage/mod.rs @@ -0,0 +1,309 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! Storage backend abstraction +//! +//! This module provides a trait-based abstraction for different storage backends, +//! allowing the controller to use either RocksDB or file-based storage. + +#[cfg(feature = "storage-rocksdb")] +pub mod rocksdb_backend; + +#[cfg(feature = "storage-file")] +pub mod file_backend; + +use std::path::PathBuf; + +use async_trait::async_trait; +use serde::de::DeserializeOwned; +use serde::Serialize; + +use crate::error::Result; + +/// Storage backend configuration +#[derive(Debug, Clone)] +pub enum StorageConfig { + /// RocksDB storage + #[cfg(feature = "storage-rocksdb")] + RocksDB { + /// Database path + path: PathBuf, + }, + + /// File-based storage + #[cfg(feature = "storage-file")] + File { + /// Data directory path + path: PathBuf, + }, + + /// In-memory storage (for testing) + Memory, +} + +/// Storage backend trait +/// +/// This trait abstracts over different storage implementations, +/// providing a unified interface for storing and retrieving data. +#[async_trait] +pub trait StorageBackend: Send + Sync { + /// Put a key-value pair + async fn put(&self, key: &str, value: &[u8]) -> Result<()>; + + /// Get a value by key + async fn get(&self, key: &str) -> Result>>; + + /// Delete a key + async fn delete(&self, key: &str) -> Result<()>; + + /// List all keys with a given prefix + async fn list_keys(&self, prefix: &str) -> Result>; + + /// Batch put multiple key-value pairs + async fn batch_put(&self, items: Vec<(String, Vec)>) -> Result<()>; + + /// Batch delete multiple keys + async fn batch_delete(&self, keys: Vec) -> Result<()>; + + /// Check if a key exists + async fn exists(&self, key: &str) -> Result; + + /// Clear all data (use with caution!) + async fn clear(&self) -> Result<()>; + + /// Sync data to disk + async fn sync(&self) -> Result<()>; + + /// Get storage statistics + async fn stats(&self) -> Result; +} + +/// Storage statistics +#[derive(Debug, Clone, Default)] +pub struct StorageStats { + /// Number of keys + pub key_count: usize, + + /// Total size in bytes + pub total_size: u64, + + /// Backend-specific info + pub backend_info: String, +} + +/// Helper methods for storing/retrieving typed data +#[async_trait] +pub trait StorageBackendExt: StorageBackend { + /// Put a serializable value + async fn put_json(&self, key: &str, value: &T) -> Result<()> { + let data = serde_json::to_vec(value) + .map_err(|e| crate::error::ControllerError::SerializationError(e.to_string()))?; + self.put(key, &data).await + } + + /// Get and deserialize a value + async fn get_json(&self, key: &str) -> Result> { + match self.get(key).await? { + Some(data) => { + let value = serde_json::from_slice(&data).map_err(|e| { + crate::error::ControllerError::SerializationError(e.to_string()) + })?; + Ok(Some(value)) + } + None => Ok(None), + } + } + + /// List all values with a given prefix + async fn list_json(&self, prefix: &str) -> Result> { + let keys = self.list_keys(prefix).await?; + let mut values = Vec::new(); + + for key in keys { + if let Some(data) = self.get(&key).await? { + let value: T = serde_json::from_slice(&data).map_err(|e| { + crate::error::ControllerError::SerializationError(e.to_string()) + })?; + values.push(value); + } + } + + Ok(values) + } +} + +// Blanket implementation for all StorageBackend implementors +impl StorageBackendExt for T {} + +/// Create a storage backend based on configuration +pub async fn create_storage(config: StorageConfig) -> Result> { + match config { + #[cfg(feature = "storage-rocksdb")] + StorageConfig::RocksDB { path } => { + let backend = rocksdb_backend::RocksDBBackend::new(path).await?; + Ok(Box::new(backend)) + } + + #[cfg(feature = "storage-file")] + StorageConfig::File { path } => { + let backend = file_backend::FileBackend::new(path).await?; + Ok(Box::new(backend)) + } + + StorageConfig::Memory => { + // For testing, use a simple in-memory implementation + use std::collections::HashMap; + use std::sync::Arc; + + use parking_lot::RwLock; + + #[derive(Clone)] + struct MemoryBackend { + data: Arc>>>, + } + + #[async_trait] + impl StorageBackend for MemoryBackend { + async fn put(&self, key: &str, value: &[u8]) -> Result<()> { + self.data.write().insert(key.to_string(), value.to_vec()); + Ok(()) + } + + async fn get(&self, key: &str) -> Result>> { + Ok(self.data.read().get(key).cloned()) + } + + async fn delete(&self, key: &str) -> Result<()> { + self.data.write().remove(key); + Ok(()) + } + + async fn list_keys(&self, prefix: &str) -> Result> { + Ok(self + .data + .read() + .keys() + .filter(|k| k.starts_with(prefix)) + .cloned() + .collect()) + } + + async fn batch_put(&self, items: Vec<(String, Vec)>) -> Result<()> { + let mut data = self.data.write(); + for (key, value) in items { + data.insert(key, value); + } + Ok(()) + } + + async fn batch_delete(&self, keys: Vec) -> Result<()> { + let mut data = self.data.write(); + for key in keys { + data.remove(&key); + } + Ok(()) + } + + async fn exists(&self, key: &str) -> Result { + Ok(self.data.read().contains_key(key)) + } + + async fn clear(&self) -> Result<()> { + self.data.write().clear(); + Ok(()) + } + + async fn sync(&self) -> Result<()> { + Ok(()) + } + + async fn stats(&self) -> Result { + let data = self.data.read(); + let total_size: u64 = data.values().map(|v| v.len() as u64).sum(); + Ok(StorageStats { + key_count: data.len(), + total_size, + backend_info: "Memory".to_string(), + }) + } + } + + Ok(Box::new(MemoryBackend { + data: Arc::new(RwLock::new(HashMap::new())), + })) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_memory_backend() { + let backend = create_storage(StorageConfig::Memory).await.unwrap(); + + // Test put and get + backend.put("test_key", b"test_value").await.unwrap(); + let value = backend.get("test_key").await.unwrap(); + assert_eq!(value, Some(b"test_value".to_vec())); + + // Test exists + assert!(backend.exists("test_key").await.unwrap()); + assert!(!backend.exists("nonexistent").await.unwrap()); + + // Test delete + backend.delete("test_key").await.unwrap(); + assert!(!backend.exists("test_key").await.unwrap()); + + // Test list_keys + backend.put("prefix_1", b"value1").await.unwrap(); + backend.put("prefix_2", b"value2").await.unwrap(); + backend.put("other_1", b"value3").await.unwrap(); + + let keys = backend.list_keys("prefix_").await.unwrap(); + assert_eq!(keys.len(), 2); + + // Test stats + let stats = backend.stats().await.unwrap(); + assert_eq!(stats.key_count, 3); + } + + #[tokio::test] + async fn test_json_operations() { + use serde::Deserialize; + use serde::Serialize; + + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] + struct TestData { + id: u64, + name: String, + } + + let backend = create_storage(StorageConfig::Memory).await.unwrap(); + + let data = TestData { + id: 123, + name: "test".to_string(), + }; + + // Test put_json and get_json + backend.put_json("test_json", &data).await.unwrap(); + let retrieved: Option = backend.get_json("test_json").await.unwrap(); + assert_eq!(retrieved, Some(data)); + } +} diff --git a/rocketmq-controller/src/storage/rocksdb_backend.rs b/rocketmq-controller/src/storage/rocksdb_backend.rs new file mode 100644 index 00000000..a4f48c90 --- /dev/null +++ b/rocketmq-controller/src/storage/rocksdb_backend.rs @@ -0,0 +1,375 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use std::path::PathBuf; +use std::sync::Arc; + +use async_trait::async_trait; +use rocksdb::Options; +use rocksdb::WriteBatch; +use rocksdb::DB; +use tracing::debug; +use tracing::info; + +use crate::error::ControllerError; +use crate::error::Result; +use crate::storage::StorageBackend; +use crate::storage::StorageStats; + +/// RocksDB storage backend +/// +/// Provides persistent storage using RocksDB, a high-performance +/// embedded database based on LevelDB. +pub struct RocksDBBackend { + /// RocksDB instance + db: Arc, + + /// Database path + path: PathBuf, +} + +impl RocksDBBackend { + /// Create a new RocksDB backend + pub async fn new(path: PathBuf) -> Result { + info!("Opening RocksDB at {:?}", path); + + // Create directory if it doesn't exist + if let Some(parent) = path.parent() { + tokio::fs::create_dir_all(parent).await.map_err(|e| { + ControllerError::StorageError(format!("Failed to create directory: {}", e)) + })?; + } + + // Configure RocksDB options + let mut opts = Options::default(); + opts.create_if_missing(true); + opts.create_missing_column_families(true); + + // Performance tuning + opts.set_max_open_files(1000); + opts.set_use_fsync(false); + opts.set_bytes_per_sync(1024 * 1024); + opts.set_level_compaction_dynamic_level_bytes(true); + opts.set_max_background_jobs(4); + + // Write buffer settings + opts.set_write_buffer_size(64 * 1024 * 1024); // 64MB + opts.set_max_write_buffer_number(3); + opts.set_min_write_buffer_number_to_merge(2); + + // Open the database + let db = DB::open(&opts, &path) + .map_err(|e| ControllerError::StorageError(format!("Failed to open RocksDB: {}", e)))?; + + info!("RocksDB opened successfully"); + + Ok(Self { + db: Arc::new(db), + path, + }) + } + + /// Get the database path + pub fn path(&self) -> &PathBuf { + &self.path + } +} + +#[async_trait] +impl StorageBackend for RocksDBBackend { + async fn put(&self, key: &str, value: &[u8]) -> Result<()> { + debug!("RocksDB put: key={}, size={}", key, value.len()); + + let db = self.db.clone(); + let key = key.to_string(); + let value = value.to_vec(); + + tokio::task::spawn_blocking(move || { + db.put(key.as_bytes(), value) + .map_err(|e| ControllerError::StorageError(format!("RocksDB put failed: {}", e))) + }) + .await + .map_err(|e| ControllerError::StorageError(format!("Task join error: {}", e)))??; + + Ok(()) + } + + async fn get(&self, key: &str) -> Result>> { + debug!("RocksDB get: key={}", key); + + let db = self.db.clone(); + let key = key.to_string(); + + tokio::task::spawn_blocking(move || { + db.get(key.as_bytes()) + .map_err(|e| ControllerError::StorageError(format!("RocksDB get failed: {}", e))) + }) + .await + .map_err(|e| ControllerError::StorageError(format!("Task join error: {}", e)))? + } + + async fn delete(&self, key: &str) -> Result<()> { + debug!("RocksDB delete: key={}", key); + + let db = self.db.clone(); + let key = key.to_string(); + + tokio::task::spawn_blocking(move || { + db.delete(key.as_bytes()) + .map_err(|e| ControllerError::StorageError(format!("RocksDB delete failed: {}", e))) + }) + .await + .map_err(|e| ControllerError::StorageError(format!("Task join error: {}", e)))??; + + Ok(()) + } + + async fn list_keys(&self, prefix: &str) -> Result> { + debug!("RocksDB list_keys: prefix={}", prefix); + + let db = self.db.clone(); + let prefix = prefix.to_string(); + + tokio::task::spawn_blocking(move || { + let mut keys = Vec::new(); + let iter = db.iterator(rocksdb::IteratorMode::Start); + + for item in iter { + match item { + Ok((key_bytes, _)) => { + if let Ok(key_str) = String::from_utf8(key_bytes.to_vec()) { + if key_str.starts_with(&prefix) { + keys.push(key_str); + } + } + } + Err(e) => { + return Err(ControllerError::StorageError(format!( + "RocksDB iteration failed: {}", + e + ))); + } + } + } + + Ok(keys) + }) + .await + .map_err(|e| ControllerError::StorageError(format!("Task join error: {}", e)))? + } + + async fn batch_put(&self, items: Vec<(String, Vec)>) -> Result<()> { + debug!("RocksDB batch_put: {} items", items.len()); + + let db = self.db.clone(); + + tokio::task::spawn_blocking(move || { + let mut batch = WriteBatch::default(); + + for (key, value) in items { + batch.put(key.as_bytes(), value); + } + + db.write(batch).map_err(|e| { + ControllerError::StorageError(format!("RocksDB batch write failed: {}", e)) + }) + }) + .await + .map_err(|e| ControllerError::StorageError(format!("Task join error: {}", e)))??; + + Ok(()) + } + + async fn batch_delete(&self, keys: Vec) -> Result<()> { + debug!("RocksDB batch_delete: {} keys", keys.len()); + + let db = self.db.clone(); + + tokio::task::spawn_blocking(move || { + let mut batch = WriteBatch::default(); + + for key in keys { + batch.delete(key.as_bytes()); + } + + db.write(batch).map_err(|e| { + ControllerError::StorageError(format!("RocksDB batch delete failed: {}", e)) + }) + }) + .await + .map_err(|e| ControllerError::StorageError(format!("Task join error: {}", e)))??; + + Ok(()) + } + + async fn exists(&self, key: &str) -> Result { + debug!("RocksDB exists: key={}", key); + + let db = self.db.clone(); + let key = key.to_string(); + + tokio::task::spawn_blocking(move || { + db.get(key.as_bytes()) + .map(|opt| opt.is_some()) + .map_err(|e| { + ControllerError::StorageError(format!("RocksDB exists check failed: {}", e)) + }) + }) + .await + .map_err(|e| ControllerError::StorageError(format!("Task join error: {}", e)))? + } + + async fn clear(&self) -> Result<()> { + info!("RocksDB clear: removing all data"); + + let db = self.db.clone(); + + tokio::task::spawn_blocking(move || { + let mut batch = WriteBatch::default(); + let iter = db.iterator(rocksdb::IteratorMode::Start); + + for item in iter { + match item { + Ok((key, _)) => { + batch.delete(&key); + } + Err(e) => { + return Err(ControllerError::StorageError(format!( + "RocksDB iteration failed: {}", + e + ))); + } + } + } + + db.write(batch) + .map_err(|e| ControllerError::StorageError(format!("RocksDB clear failed: {}", e))) + }) + .await + .map_err(|e| ControllerError::StorageError(format!("Task join error: {}", e)))??; + + Ok(()) + } + + async fn sync(&self) -> Result<()> { + debug!("RocksDB sync"); + + let db = self.db.clone(); + + tokio::task::spawn_blocking(move || { + db.flush() + .map_err(|e| ControllerError::StorageError(format!("RocksDB sync failed: {}", e))) + }) + .await + .map_err(|e| ControllerError::StorageError(format!("Task join error: {}", e)))??; + + Ok(()) + } + + async fn stats(&self) -> Result { + debug!("RocksDB stats"); + + let db = self.db.clone(); + + tokio::task::spawn_blocking(move || { + let mut key_count = 0; + let mut total_size = 0u64; + + let iter = db.iterator(rocksdb::IteratorMode::Start); + for item in iter { + match item { + Ok((key, value)) => { + key_count += 1; + total_size += (key.len() + value.len()) as u64; + } + Err(e) => { + return Err(ControllerError::StorageError(format!( + "RocksDB iteration failed: {}", + e + ))); + } + } + } + + // Get RocksDB property + let backend_info = db + .property_value("rocksdb.stats") + .unwrap_or(None) + .unwrap_or_else(|| "RocksDB".to_string()); + + Ok(StorageStats { + key_count, + total_size, + backend_info, + }) + }) + .await + .map_err(|e| ControllerError::StorageError(format!("Task join error: {}", e)))? + } +} + +#[cfg(test)] +mod tests { + use tempfile::TempDir; + + use super::*; + + #[tokio::test] + async fn test_rocksdb_backend() { + let temp_dir = TempDir::new().unwrap(); + let db_path = temp_dir.path().join("test_db"); + + let backend = RocksDBBackend::new(db_path).await.unwrap(); + + // Test put and get + backend.put("test_key", b"test_value").await.unwrap(); + let value = backend.get("test_key").await.unwrap(); + assert_eq!(value, Some(b"test_value".to_vec())); + + // Test exists + assert!(backend.exists("test_key").await.unwrap()); + assert!(!backend.exists("nonexistent").await.unwrap()); + + // Test delete + backend.delete("test_key").await.unwrap(); + assert!(!backend.exists("test_key").await.unwrap()); + + // Test batch operations + let items = vec![ + ("batch_1".to_string(), b"value1".to_vec()), + ("batch_2".to_string(), b"value2".to_vec()), + ]; + backend.batch_put(items).await.unwrap(); + + assert!(backend.exists("batch_1").await.unwrap()); + assert!(backend.exists("batch_2").await.unwrap()); + + // Test list_keys + backend.put("prefix_1", b"value1").await.unwrap(); + backend.put("prefix_2", b"value2").await.unwrap(); + + let keys = backend.list_keys("prefix_").await.unwrap(); + assert_eq!(keys.len(), 2); + + // Test stats + let stats = backend.stats().await.unwrap(); + assert!(stats.key_count >= 4); + + // Test sync + backend.sync().await.unwrap(); + } +}