use df_ls_diagnostics::lsp_types::*; use df_ls_lexical_analysis::test_utils::LexerTestBuilder; #[test] fn test_token_argument_character_cp437_permutations() { let source = "Token Argument Character [A:'a'] [A:'c'] [A:'z'] [A:'!'] [A:'''] [A:'['] [A:'%'] [A:'#'] [A:'''] [A:'₧'] = 'Γéº' (because of CP437) so is string [A:'║'] = 'Γòæ' (because of CP437) so is string [A:'±'] = '┬▒' (because of CP437) so is string [A:'±':] = '┬▒' (because of CP437) so is string [A:'±':'c'] = '┬▒' (because of CP437) so is string The following 2 are not allowed [A:':'] 2 arguments, each are strings with `'`. [A:']'] Closes the token with a string of `'`. "; // Convert source from CP437 to UTF-8 let source_bytes = df_cp437::convert_cp437_to_utf8(source.as_bytes()); let source = String::from_utf8(source_bytes).expect("Non UTF-8 characters found"); LexerTestBuilder::test_source(source) .add_test_s_exp( "(raw_file (header: `Token Argument Character`) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'a'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'c'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'z'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'!'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'''`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'['`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'%'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'#'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'''`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_string: `'Γéº'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_string: `'Γòæ'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_string: `'┬▒'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_string: `'┬▒'`) (:) (token_argument_empty: ``) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_string: `'┬▒'`) (:) (token_argument_character: `'c'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_string: `'`) (:) (token_argument_string: `'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_string: `'`) ) (]) ) (comment) (ERROR: `]`) (comment) (EOF: ``) ) ", ) .add_test_lexer_diagnostics_codes(vec!["unexpected_end_bracket"]) .add_test_lexer_diagnostics_ranges(vec![Range { start: Position { line: 17, character: 14, }, end: Position { line: 17, character: 15, }, }]) .run_test(); } #[test] fn test_token_argument_character_utf8_permutations() { LexerTestBuilder::test_source( "Token Argument Character [A:'a'] [A:'c'] [A:'z'] [A:'!'] [A:'''] [A:'['] [A:'%'] [A:'#'] [A:'''] [A:'₧'] one character in utf8 so is string [A:'║'] one character in utf8 so is string [A:'±'] one character in utf8 so is string [A:'±':] one character in utf8 so is string [A:'±':'c'] one character in utf8 so is string The following 2 are not allowed [A:':'] 2 arguments, each are strings with `'`. [A:']'] Closes the token with a string of `'`. ", ) .add_test_s_exp( "(raw_file (header: `Token Argument Character`) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'a'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'c'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'z'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'!'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'''`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'['`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'%'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'#'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'''`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'₧'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'║'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'±'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'±'`) (:) (token_argument_empty: ``) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_character: `'±'`) (:) (token_argument_character: `'c'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_string: `'`) (:) (token_argument_string: `'`) ) (]) ) (comment) (token ([) (token_name: `A`) (token_arguments (:) (token_argument_string: `'`) ) (]) ) (comment) (ERROR: `]`) (comment) (EOF: ``) ) ", ) .add_test_lexer_diagnostics_codes(vec!["unexpected_end_bracket"]) .add_test_lexer_diagnostics_ranges(vec![Range { start: Position { line: 17, character: 14, }, end: Position { line: 17, character: 15, }, }]) .run_test(); }