use df_ls_diagnostics::lsp_types::*; use df_ls_lexical_analysis::test_utils::LexerTestBuilder; #[test] fn test_token_name_permutations() { LexerTestBuilder::test_source( "descriptor_color_standard [aA] [Aa] [aAa] [AaA] [bab] [AAA] [BAB] [A_A] [_Aa] [_aA] [A_a] [Aa_] [a_A] [aA_] [A9] [9A] [A_9] [9_A] [9___A] [9___9__A] [REF:a] [NAME:REF] ", ) .add_test_s_exp( "(raw_file (header: `descriptor_color_standard`) (comment) (token ([) (token_name: `A`) (]) ) (comment) (token ([) (token_name: `A`) (ERROR: `a`) (]) ) (comment) (token ([) (token_name: `A`) (ERROR: `a`) (]) ) (comment) (token ([) (token_name: `A`) (ERROR: `aA`) (]) ) (comment) (token ([) (token_name: ``) (ERROR: `bab`) (]) ) (comment) (token ([) (token_name: `AAA`) (]) ) (comment) (token ([) (token_name: `BAB`) (]) ) (comment) (token ([) (token_name: `A_A`) (]) ) (comment) (token ([) (token_name: `A`) (ERROR: `a`) (]) ) (comment) (token ([) (token_name: `A`) (]) ) (comment) (token ([) (token_name: `A_`) (ERROR: `a`) (]) ) (comment) (token ([) (token_name: `A`) (ERROR: `a_`) (]) ) (comment) (token ([) (token_name: `A`) (]) ) (comment) (token ([) (token_name: `A_`) (]) ) (comment) (token ([) (token_name: `A9`) (]) ) (comment) (token ([) (token_name: `A`) (]) ) (comment) (token ([) (token_name: `A_9`) (]) ) (comment) (token ([) (token_name: `A`) (]) ) (comment) (token ([) (token_name: `A`) (]) ) (comment) (token ([) (token_name: `A`) (]) ) (comment) (token ([) (token_name: `REF`) (token_arguments (:) (token_argument_string: `a`) ) (]) ) (comment) (token ([) (token_name: `NAME`) (token_arguments (:) (token_argument_reference: `REF`) ) (]) ) (comment) (EOF: ``) ) ", ) .add_test_lexer_diagnostics_codes(vec![ "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", "missing_token_name", "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", "unexpected_characters", ]) .add_test_lexer_diagnostics_ranges(vec![ Range { start: Position { line: 1, character: 9, }, end: Position { line: 1, character: 10, }, }, Range { start: Position { line: 2, character: 10, }, end: Position { line: 2, character: 11, }, }, Range { start: Position { line: 3, character: 9, }, end: Position { line: 3, character: 10, }, }, Range { start: Position { line: 3, character: 11, }, end: Position { line: 3, character: 12, }, }, Range { start: Position { line: 4, character: 10, }, end: Position { line: 4, character: 12, }, }, Range { start: Position { line: 5, character: 9, }, end: Position { line: 5, character: 9, }, }, Range { start: Position { line: 5, character: 9, }, end: Position { line: 5, character: 12, }, }, Range { start: Position { line: 9, character: 9, }, end: Position { line: 9, character: 10, }, }, Range { start: Position { line: 9, character: 11, }, end: Position { line: 9, character: 12, }, }, Range { start: Position { line: 10, character: 9, }, end: Position { line: 10, character: 11, }, }, Range { start: Position { line: 11, character: 11, }, end: Position { line: 11, character: 12, }, }, Range { start: Position { line: 12, character: 10, }, end: Position { line: 12, character: 12, }, }, Range { start: Position { line: 13, character: 9, }, end: Position { line: 13, character: 11, }, }, Range { start: Position { line: 14, character: 9, }, end: Position { line: 14, character: 10, }, }, Range { start: Position { line: 16, character: 9, }, end: Position { line: 16, character: 10, }, }, Range { start: Position { line: 18, character: 9, }, end: Position { line: 18, character: 11, }, }, Range { start: Position { line: 19, character: 9, }, end: Position { line: 19, character: 13, }, }, Range { start: Position { line: 20, character: 9, }, end: Position { line: 20, character: 16, }, }, ]) .run_test(); }