use df_ls_diagnostics::lsp_types::*; use df_ls_lexical_analysis::test_utils::LexerTestBuilder; #[test] fn test_format_code() { LexerTestBuilder::test_source( "creature_ocean_new [CREATURE:CRAB_MAN] Missing start bracket AMPHIBIOUS] Missing end bracket [COPY_TAGS_FROM:CRAB Missing first part of token [:A DF Raw Language Server developer with the head and pincers of a crab.] Missing token [] First part of token has to be of type 'token_name' [crab man:crab men:crab man] [100] ['c'] Should not be able to spread tokens across multiple lines (probably) THIS HAS NOT BEEN IMPLEMENTED YET, SO RIGHT NOW IT'S JUST SEEN AS A NORMAL TOKEN [ DESCRIPTION :This and above DESCRIPTION should be considered comments. ] ", ) .add_test_formatted_source("creature_ocean_new [CREATURE:CRAB_MAN] Missing start bracket AMPHIBIOUS Missing end bracket [COPY_TAGS_FROM:CRAB] Missing first part of token [:A DF Raw Language Server developer with the head and pincers of a crab.] Missing token [] First part of token has to be of type 'token_name' [:crab men:crab man] [] [] Should not be able to spread tokens across multiple lines (probably) THIS HAS NOT BEEN IMPLEMENTED YET, SO RIGHT NOW IT'S JUST SEEN AS A NORMAL TOKEN [] DESCRIPTION :This and above DESCRIPTION should be considered comments. " ) .add_test_lexer_diagnostics_codes(vec![ "unexpected_end_bracket", "missing_end_bracket", "missing_token_name", "missing_token_name", "missing_token_name", "unexpected_characters", "missing_token_name", "unexpected_characters", "missing_token_name", "unexpected_characters", "missing_token_name", "missing_end_bracket", "unexpected_end_bracket", ]) .add_test_lexer_diagnostics_ranges(vec![ Range { start: Position { line: 5, character: 18, }, end: Position { line: 5, character: 19, }, }, Range { start: Position { line: 8, character: 28, }, end: Position { line: 8, character: 28, }, }, Range { start: Position { line: 11, character: 9, }, end: Position { line: 11, character: 9, }, }, Range { start: Position { line: 14, character: 9, }, end: Position { line: 14, character: 9, }, }, Range { start: Position { line: 17, character: 9, }, end: Position { line: 17, character: 9, }, }, Range { start: Position { line: 17, character: 9, }, end: Position { line: 17, character: 17, }, }, Range { start: Position { line: 18, character: 9, }, end: Position { line: 18, character: 9, }, }, Range { start: Position { line: 18, character: 9, }, end: Position { line: 18, character: 12, }, }, Range { start: Position { line: 19, character: 9, }, end: Position { line: 19, character: 9, }, }, Range { start: Position { line: 19, character: 9, }, end: Position { line: 19, character: 12, }, }, Range { start: Position { line: 23, character: 9, }, end: Position { line: 23, character: 9, }, }, Range { start: Position { line: 23, character: 9, }, end: Position { line: 23, character: 9, }, }, Range { start: Position { line: 26, character: 8, }, end: Position { line: 26, character: 9, }, }, ]) .run_test(); } #[test] fn test_format_tree() { LexerTestBuilder::test_source( "creature_ocean_new [CREATURE:CRAB_MAN] Missing start bracket AMPHIBIOUS] Missing end bracket [COPY_TAGS_FROM:CRAB Missing first part of token [:A DF Raw Language Server developer with the head and pincers of a crab.] Missing token [] First part of token has to be of type 'token_name' [crab man:crab men:crab man] [100] ['c'] Should not be able to spread tokens across multiple lines (probably) THIS HAS NOT BEEN IMPLEMENTED YET, SO RIGHT NOW IT'S JUST SEEN AS A NORMAL TOKEN [ DESCRIPTION :This and above DESCRIPTION should be considered comments. ] " ) .add_test_s_exp("(raw_file (header: `creature_ocean_new`) (comment) (token ([) (token_name: `CREATURE`) (token_arguments (:) (token_argument_reference: `CRAB_MAN`) ) (]) ) (comment) (ERROR: `]`) (comment) (token ([) (token_name: `COPY_TAGS_FROM`) (token_arguments (:) (token_argument_reference: `CRAB`) ) (]) ) (comment) (token ([) (token_name: ``) (token_arguments (:) (token_argument_string: `A DF Raw Language Server developer with the head and pincers of a crab.`) ) (]) ) (comment) (token ([) (token_name: ``) (]) ) (comment) (token ([) (token_name: ``) (ERROR: `crab man`) (token_arguments (:) (token_argument_string: `crab men`) (:) (token_argument_string: `crab man`) ) (]) ) (comment) (token ([) (token_name: ``) (ERROR: `100`) (]) ) (comment) (token ([) (token_name: ``) (ERROR: `'c'`) (]) ) (comment) (token ([) (token_name: ``) (]) ) (comment) (ERROR: `]`) (comment) (EOF: ``) ) " ) .add_test_lexer_diagnostics_codes(vec![ "unexpected_end_bracket", "missing_end_bracket", "missing_token_name", "missing_token_name", "missing_token_name", "unexpected_characters", "missing_token_name", "unexpected_characters", "missing_token_name", "unexpected_characters", "missing_token_name", "missing_end_bracket", "unexpected_end_bracket",]) .add_test_lexer_diagnostics_ranges(vec![ Range { start: Position { line: 5, character: 18, }, end: Position { line: 5, character: 19, }, }, Range { start: Position { line: 8, character: 28, }, end: Position { line: 8, character: 28, }, }, Range { start: Position { line: 11, character: 9, }, end: Position { line: 11, character: 9, }, }, Range { start: Position { line: 14, character: 9, }, end: Position { line: 14, character: 9, }, }, Range { start: Position { line: 17, character: 9, }, end: Position { line: 17, character: 9, }, }, Range { start: Position { line: 17, character: 9, }, end: Position { line: 17, character: 17, }, }, Range { start: Position { line: 18, character: 9, }, end: Position { line: 18, character: 9, }, }, Range { start: Position { line: 18, character: 9, }, end: Position { line: 18, character: 12, }, }, Range { start: Position { line: 19, character: 9, }, end: Position { line: 19, character: 9, }, }, Range { start: Position { line: 19, character: 9, }, end: Position { line: 19, character: 12, }, }, Range { start: Position { line: 23, character: 9, }, end: Position { line: 23, character: 9, }, }, Range { start: Position { line: 23, character: 9, }, end: Position { line: 23, character: 9, }, }, Range { start: Position { line: 26, character: 8, }, end: Position { line: 26, character: 9, }, }, ]) .run_test(); }