[package] name = "jieba-rs-siro" version = "0.6.7" authors = ["messense ", "Paul Meng "] categories = ["text-processing"] description = "The Jieba Chinese Word Segmentation Implemented in Rust" keywords = ["nlp", "chinese", "segmenation"] license = "MIT" readme = "README.md" repository = "https://github.com/sirodeneko/jieba-rs" edition = '2018' [package.metadata.docs.rs] all-features = true [dev-dependencies] criterion = "0.3" rand = "0.8" wasm-bindgen-test = "0.3.0" [target.'cfg(unix)'.dev-dependencies] jemallocator = "0.3.2" [[bench]] name = "jieba_benchmark" harness = false required-features = ["tfidf", "textrank"] [dependencies] regex = "1.0" lazy_static = "1.0" phf = "0.10" hashbrown = { version = "0.12", default-features = false, features = ["inline-more"] } cedarwood = "0.4" ordered-float = { version = "2.0", optional = true } fxhash = "0.2.1" [build-dependencies] phf_codegen = "0.10" [features] default = ["default-dict"] default-dict = [] tfidf = ["ordered-float"] textrank = ["ordered-float"] [workspace] members = [ ".", "capi", "examples/weicheng" ]