use df_ls_diagnostics::lsp_types::*; use df_ls_lexical_analysis::test_utils::LexerTestBuilder; #[test] fn test_token_body_permutations() { LexerTestBuilder::test_source( "Token Body [a] [A] [a:a] [A:a] [A:A] [a:A] ", ) .add_test_s_exp( "(raw_file (header: `Token Body`) (comment) (token ([) (token_name: ``) (ERROR: `a`) (]) ) (comment) (token ([) (token_name: `A`) (]) ) (comment) (token ([) (token_name: ``) (ERROR: `a`) (token_arguments (:) (token_argument_string: `a`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_string: `a`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_reference: `A`) ) (]) ) (comment) (token ([) (token_name: `A`) (]) ) (comment) (EOF: ``) ) ", ) .add_test_lexer_diagnostics_codes(vec![ "missing_token_name", "unexpected_characters", "missing_token_name", "unexpected_characters", "unexpected_characters", ]) .add_test_lexer_diagnostics_ranges(vec![ Range { start: Position { line: 1, character: 9, }, end: Position { line: 1, character: 9, }, }, Range { start: Position { line: 1, character: 9, }, end: Position { line: 1, character: 10, }, }, Range { start: Position { line: 3, character: 9, }, end: Position { line: 3, character: 9, }, }, Range { start: Position { line: 3, character: 9, }, end: Position { line: 3, character: 10, }, }, Range { start: Position { line: 6, character: 9, }, end: Position { line: 6, character: 11, }, }, ]) .run_test(); }