syntax = "proto3"; package cluster; option go_package = "github.com/CeresDB/ceresdbproto/golang/pkg/clusterpb"; enum NodeState { ONLINE = 0; OFFLINE = 1; } enum ShardRole { LEADER = 0; FOLLOWER = 1; } message PartitionInfo { oneof info { HashPartitionInfo hash = 1; KeyPartitionInfo key = 2; RandomPartitionInfo random = 3; } } message PartitionDefinition { string name = 1; oneof origin_name { string origin = 2; } } message HashPartitionInfo { int32 version = 1; repeated PartitionDefinition definitions = 2; bytes expr = 3; bool linear = 4; } message KeyPartitionInfo { int32 version = 1; repeated PartitionDefinition definitions = 2; repeated string partition_key = 3; bool linear = 4; } message RandomPartitionInfo { repeated PartitionDefinition definitions = 1; } // example: // cluster 1: v1/cluster/1 -> ceresmeta.Cluster // cluster 2: v1/cluster/2 -> ceresmeta.Cluster message Cluster { enum TopologyType { UNKNOWN = 0; STATIC = 1; DYNAMIC = 2; } uint32 id = 1; string name = 2; uint32 min_node_count = 3; // the number of shard replication uint32 replication_factor = 4; uint32 shard_total = 5; uint64 created_at = 6; // Deprecated: This field is deprecated after CeresDB v1.2.0 . uint32 partition_table_ratio_of_nodes = 7; uint64 modified_at = 8; bool enable_schedule = 9; TopologyType topology_type = 10; // The maximum number of concurrent executing procedures in a single // `BatchProcedure`, it is only used in `TransferLeaderBatchProcedure` now. uint32 procedure_executing_batch_size = 11; } // example: // cluster 1: v1/cluster/1/view/latest_version -> 99 // v1/cluster/1/view/99 -> ceresmeta.ClusterView // v1/cluster/1/view/98 -> ceresmeta.ClusterView message ClusterView { enum ClusterState { EMPTY = 0; PREPARE_REBALANCE = 1; AWAITING_CLOSE = 2; AWAITING_OPEN = 3; STABLE = 4; } uint32 cluster_id = 1; uint64 version = 2; ClusterState state = 3; repeated ShardNode shard_nodes = 4; // cluster view's changed cause string cause = 5; uint64 created_at = 6; } message ShardNode { uint32 id = 1; ShardRole shard_role = 2; string node = 3; } // example: // cluster 1: v1/cluster/1/schema/1 -> ceresmeta.Schema // v1/cluster/1/schema/2 -> ceresmeta.Schema // v1/cluster/1/schema/3 -> ceresmeta.Schema message Schema { uint32 id = 1; uint32 cluster_id = 2; string name = 3; uint64 created_at = 4; } // example: // cluster 1: v1/cluster/1/schema/1/table/1 -> ceresmeta.Table // v1/cluster/1/schema/1/table/2 -> ceresmeta.Table // v1/cluster/1/schema/1/table/3 -> ceresmeta.Table // v1/cluster/1/schema/2/table/4 -> ceresmeta.Table // cluster 1 tableName-> ID : // v1/cluster/1/schema/1/table_name_to_id/table1 -> 1 // v1/cluster/1/schema/1/table_name_to_id/table2 -> 2 // v1/cluster/1/schema/1/table_name_to_id/table3 -> 3 // v1/cluster/1/schema/2/table_name_to_id/table4 -> 4 message Table { uint64 id = 1; string name = 2; uint32 schema_id = 3; string desc = 4; uint64 created_at = 5; PartitionInfo partition_info = 6; } // example: // cluster 1: v1/cluster/1/shard_view/1/latest_version -> 9 // v1/cluster/1/shard_view/1/9 -> ceresmeta.ShardView // v1/cluster/1/shard_view/1/8 -> ceresmeta.ShardView // v1/cluster/1/shard_view/2/latest_version -> 3 // v1/cluster/1/shard_view/2/3 -> ceresmeta.ShardView message ShardView { uint32 shard_id = 1; uint64 version = 2; repeated uint64 table_ids = 3; uint64 created_at = 4; } // example: // cluster 1: v1/cluster/1/node/127.0.0.1:8081 -> value is ceresmeta.Node // v1/cluster/1/node/127.0.0.2:8081 -> value is ceresmeta.Node message Node { string name = 1; NodeStats stats = 2; uint64 last_touch_time = 3; NodeState state = 4; } message NodeStats { uint32 lease = 1; string zone = 2; string node_version = 3; }