# -*- coding: utf-8 -*- ''' transliteration.py Automatically builds rules for transforming other scripts (e.g. Cyrillic, Greek, Han, Katakana, Devanagari, etc.) into Latin characters. Uses XML transforms from the CLDR repository. ''' import argparse import codecs import csv import htmlentitydefs import itertools import os import re import requests import six import sys import time import urlparse import unicodedata from collections import defaultdict, deque from lxml import etree from scanner import Scanner from unicode_data import * from unicode_properties import * from unicode_paths import CLDR_DIR from geodata.encoding import safe_decode, safe_encode from geodata.string_utils import NUM_CODEPOINTS, wide_unichr, wide_ord CLDR_TRANSFORMS_DIR = os.path.join(CLDR_DIR, 'common', 'transforms') PRE_TRANSFORM = 1 FORWARD_TRANSFORM = 2 BACKWARD_TRANSFORM = 3 BIDIRECTIONAL_TRANSFORM = 4 PRE_TRANSFORM_OP = '::' BACKWARD_TRANSFORM_OPS = set([u'←', u'<']) FORWARD_TRANSFORM_OPS = set([u'→', u'>']) BIDIRECTIONAL_TRANSFORM_OPS = set([u'↔', u'<>']) ASSIGNMENT_OP = '=' PRE_CONTEXT_INDICATOR = '{' POST_CONTEXT_INDICATOR = '}' REVISIT_INDICATOR = '|' WORD_BOUNDARY_VAR_NAME = 'wordBoundary' WORD_BOUNDARY_VAR = '${}'.format(WORD_BOUNDARY_VAR_NAME) START_OF_HAN_VAR_NAME = 'startOfHanMarker' START_OF_HAN_VAR = '${}'.format(START_OF_HAN_VAR_NAME) start_of_han_regex = re.compile(START_OF_HAN_VAR.replace('$', '\$')) word_boundary_var_regex = re.compile(WORD_BOUNDARY_VAR.replace('$', '\$')) WORD_BOUNDARY_CHAR = u'\u0001' EMPTY_TRANSITION = u'\u0004' NAMESPACE_SEPARATOR_CHAR = u"|" WORD_BOUNDARY_CHAR = u"\x01" PRE_CONTEXT_CHAR = u"\x86" POST_CONTEXT_CHAR = u"\x87" EMPTY_TRANSITION_CHAR = u"\x04" REPEAT_CHAR = u"\x05" GROUP_INDICATOR_CHAR = u"\x1d" BEGIN_SET_CHAR = u"\x0f" END_SET_CHAR = u"\x0e" BIDIRECTIONAL_TRANSLITERATORS = { 'fullwidth-halfwidth': 'halfwidth-fullwidth' } REVERSE_TRANSLITERATORS = { 'latin-katakana': 'katakana-latin', 'latin-conjoiningjamo': 'conjoiningjamo-latin', } EXCLUDE_TRANSLITERATORS = set([ # Don't care about spaced Han because our tokenizer does it already 'han-spacedhan', # Doesn't appear to be used in ICU 'korean-latin-bgn', ]) TRANSLITERATOR_ALIASES = { 'greek_latin_ungegn': 'greek-latin-ungegn' } NFD = 'NFD' NFKD = 'NFKD' NFC = 'NFC' NFKC = 'NFKC' STRIP_MARK = 'STRIP_MARK' LOWER = 'lower' UPPER = 'upper' TITLE = 'title' UNICODE_NORMALIZATION_TRANSFORMS = set([ NFD, NFKD, NFC, NFKC, STRIP_MARK, ]) class TransliterationParseError(Exception): pass RULE = 'RULE' TRANSFORM = 'TRANSFORM' FILTER = 'FILTER' UTF8PROC_TRANSFORMS = { 'Any-NFC': NFC, 'Any-NFD': NFD, 'Any-NFKD': NFKD, 'Any-NFKC': NFKC, 'Any-Lower': LOWER, 'Any-Upper': UPPER, 'Any-Title': TITLE, } CONTEXT_TYPE_NONE = 'CONTEXT_TYPE_NONE' CONTEXT_TYPE_STRING = 'CONTEXT_TYPE_STRING' CONTEXT_TYPE_WORD_BOUNDARY = 'CONTEXT_TYPE_WORD_BOUNDARY' CONTEXT_TYPE_REGEX = 'CONTEXT_TYPE_REGEX' all_transforms = set() pre_transform_full_regex = re.compile('::[\s]*(.*)[\s]*', re.UNICODE) pre_transform_regex = re.compile('[\s]*([^\s\(\)]*)[\s]*(?:\((.*)\)[\s]*)?', re.UNICODE) assignment_regex = re.compile(u"(?:[\s]*(\$[^\s\=]+)[\s]*\=[\s]*(?!=[\s])(.*)(?)|[←<→>↔])(?:[\s]*(?!=[\s])(.*)(? start_ord: # Ranges are inclusive chars.extend([wide_unichr(c) for c in range(start_ord, end_ord + 1)]) return chars chars = get_chars_by_script() all_scripts = build_master_scripts_list(chars) script_codes = {k.lower(): v.lower() for k, v in six.iteritems(get_script_codes(all_scripts))} def parse_regex_char_class(c, current_filter=all_chars): chars = [] orig = c if c.startswith('\\p'): c = c.split('{')[-1].split('}')[0] c = c.strip(': ') is_negation = False if c.startswith('^'): is_negation = True c = c.strip('^') if '=' in c: prop, value = c.split('=') prop = unicode_property_aliases.get(prop.lower(), prop) value = unicode_property_value_aliases.get(prop.lower(), {}).get(value, value) if prop == COMBINING_CLASS_PROP: chars = unicode_combining_classes[value] elif prop == GENERAL_CATEGORY_PROP: chars = unicode_categories.get(value, unicode_general_categories[value]) elif prop == BLOCK_PROP: chars = unicode_blocks[value.lower()] elif prop == SCRIPT_PROP: if value.lower() in unicode_scripts: chars = unicode_scripts[value.lower()] elif value.lower() in script_codes: chars = unicode_scripts[script_codes[value.lower()]] elif prop == WORD_BREAK_PROP: chars = unicode_word_breaks[value] else: raise TransliterationParseError(c) else: c = c.replace('-', '_').replace(' ', '_') if c.lower() in unicode_property_aliases: c = unicode_property_aliases[c.lower()] elif c.lower() in unicode_category_aliases: c = unicode_category_aliases[c.lower()] if c in unicode_general_categories: chars = unicode_general_categories[c] elif c in unicode_categories: chars = unicode_categories[c] elif c.lower() in unicode_properties: chars = unicode_properties[c.lower()] elif c.lower() in unicode_scripts: chars = unicode_scripts[c.lower()] elif c.lower() in script_codes: chars = unicode_scripts[script_codes[c.lower()]] elif c.lower() in unicode_properties: chars = unicode_properties[c.lower()] else: raise TransliterationParseError(c) if is_negation: chars = current_filter - set(chars) return sorted((set(chars) & current_filter) - control_chars) def parse_balanced_sets(s): open_brackets = 0 max_nesting = 0 skip = False for i, ch in enumerate(s): if ch == '[': if open_brackets == 0: start = i max_nesting open_brackets += 1 elif ch == ']': open_brackets -= 1 if open_brackets == 0: skip = False yield (s[start:i + 1], CHAR_MULTI_SET) (start, i + 1) elif open_brackets == 0 and not skip: for token, token_class in char_set_scanner.scan(s[i:]): if token_class not in (CHAR_SET, CHAR_MULTI_SET, OPEN_SET, CLOSE_SET): yield token, token_class else: break skip = True def parse_regex_char_set(s, current_filter=all_chars): ''' Given a regex character set, which may look something like: [[:Latin:][:Greek:] & [:Ll:]] [A-Za-z_] [ $lowerVowel $upperVowel ] Parse into a single, flat character set without the unicode properties, ranges, unions/intersections, etc. ''' s = s[1:-1] is_negation = False this_group = set() is_intersection = False is_difference = False is_word_boundary = False real_chars = set() for token, token_class in parse_balanced_sets(s): if token_class == CHAR_RANGE: this_char_set = set(parse_regex_char_range(token)) this_group |= this_char_set elif token_class == ESCAPED_CHARACTER: token = token.strip('\\') this_group.add(token) real_chars.add(token) elif token_class == SINGLE_QUOTE: t = "'" this_group.add(t) real_chars.add(t) elif token_class == QUOTED_STRING: t = token.strip("'") this_group.add(t) real_chars.add(t) elif token_class == NEGATION: is_negation = True elif token_class in (CHAR_CLASS, CHAR_CLASS_PCRE): this_group |= set(parse_regex_char_class(token, current_filter=current_filter)) elif token_class in (CHAR_SET, CHAR_MULTI_SET): # Recursive calls, as performance doesn't matter here and nesting is shallow this_char_set = set(parse_regex_char_set(token, current_filter=current_filter)) if is_intersection: this_group &= this_char_set is_intersection = False elif is_difference: this_group -= this_char_set is_difference = False else: this_group |= this_char_set elif token_class == INTERSECTION: is_intersection = True elif token_class == DIFFERENCE: is_difference = True elif token_class == CHARACTER and token not in control_chars: this_group.add(token) real_chars.add(token) elif token_class in (UNICODE_CHARACTER, UNICODE_WIDE_CHARACTER): token = token.decode('unicode-escape') if token not in control_chars: this_group.add(token) real_chars.add(token) elif token_class == WIDE_CHARACTER: if token not in control_chars: this_group.add(token) real_chars.add(token) elif token_class == BRACKETED_CHARACTER: if token.strip('{{}}') not in control_chars: this_group.add(token) real_chars.add(token) elif token_class == WORD_BOUNDARY: is_word_boundary = True if is_negation: this_group = current_filter - this_group return sorted((this_group & (current_filter | real_chars)) - control_chars) + ([WORD_BOUNDARY_CHAR] if is_word_boundary else []) for name, regex_range in unicode_property_regexes: unicode_properties[name] = parse_regex_char_set(regex_range) init_unicode_categories() hangul_jamo_latin_filter = set(parse_regex_char_set("[['ᄀ-하-ᅵᆨ-ᇂ가-힣ㄱ-ㄿㅁ-ㅃㅅ-ㅣ㈀-㈜㉠-㉻가-힣'ᄀ-ᆵᄆ-ᄈᄉ-하-ᅦᅧ-ᅬᅭ-ᅲᅳ-][:Latin:]]")) custom_filters = { 'conjoiningjamo-latin': hangul_jamo_latin_filter, } def get_source_and_target(name): name = TRANSLITERATOR_ALIASES.get(name.lower(), name.lower()) components = name.split('-')[:2] if len(components) < 2: raise Exception(name) return components def is_internal(xml): return xml.xpath('//transform/@visibility="internal"') def split_rule(rule): splits = [] current_token = [] in_set = False in_group = False open_brackets = 0 for token, token_type in rule_scanner.scan(rule): if token_type == ESCAPED_CHARACTER: current_token.append(token) elif token_type == OPEN_SET: in_set = True open_brackets += 1 current_token.append(token) elif token_type == CLOSE_SET: open_brackets -= 1 current_token.append(token) if open_brackets == 0: in_set = False elif token_type == END and not in_set: current_token.append(token) splits.append(u''.join(current_token).strip()) current_token = [] else: current_token.append(token) return splits def get_raw_rules_and_variables(xml, reverse=False): ''' Parse tRule nodes from the transform XML At this point we only care about lvalue, op and rvalue for parsing forward and two-way transforms. Variables are collected in a dictionary in this pass so they can be substituted later ''' rules = [] variables = {} in_compound_rule = False compound_rule = [] nodes = xml.xpath('*//tRule') lines = [l for n in nodes for l in (newline_regex.split(n.text) if n.text else [])] if reverse: lines = reversed(lines) queue = deque(lines) while queue: rule = queue.popleft() rule = safe_decode(comment_regex.split(rule)[0].strip()) splits = split_rule(rule) if len(splits) > 1: for r in splits[1:]: queue.appendleft(r) if rule.strip() not in rule_map: rule = literal_space_regex.sub(replace_literal_space, rule) rule = rule.rstrip(END_CHAR).strip() else: rule = rule_map[rule.strip()] if rule.strip().endswith('\\'): compound_rule.append(rule.rstrip('\\')) in_compound_rule = True continue elif in_compound_rule: compound_rule.append(rule) rule = u''.join(compound_rule) in_compound_rule = False compound_rule = [] assignment = assignment_regex.match(rule) transform = transform_regex.match(rule) pre_transform = pre_transform_full_regex.match(rule) if pre_transform: rules.append((PRE_TRANSFORM, pre_transform.group(1))) elif assignment: lvalue, rvalue = assignment.groups() var_name = lvalue.strip().lstrip('$') rvalue = rvalue.strip() variables[var_name] = rvalue elif transform: lvalue, op, rvalue = transform.groups() lvalue = lvalue.strip() rvalue = rvalue.strip() if op in FORWARD_TRANSFORM_OPS: rules.append((FORWARD_TRANSFORM, (lvalue, rvalue))) elif op in BIDIRECTIONAL_TRANSFORM_OPS: rules.append((BIDIRECTIONAL_TRANSFORM, (lvalue, rvalue))) elif op in BACKWARD_TRANSFORM_OPS: rules.append((BACKWARD_TRANSFORM, (lvalue, rvalue))) return rules, variables CHAR_CLASSES = set([ ESCAPED_CHARACTER, CHAR_CLASS, QUOTED_STRING, CHARACTER, GROUP_REF, ]) def char_permutations(s, current_filter=all_chars, reverse=False): ''' char_permutations Parses the lvalue or rvalue of a transform rule into a list of character permutations, in addition to keeping track of revisits and regex groups ''' if not s: return deque([EMPTY_TRANSITION_CHAR]), deque([]), [] char_types = deque() add_char_type = deque.append if not reverse else deque.appendleft last_index = -1 if not reverse else 0 revisit_char_types = deque() in_revisit = False in_group = False last_token_group_start = False start_group = 0 end_group = 0 open_brackets = 0 current_set = [] current_chars = char_types groups = [] for token, token_type in transform_scanner.scan(s): if open_brackets > 0 and token_type not in (OPEN_SET, CLOSE_SET): current_set.append(token) continue if token_type == ESCAPED_CHARACTER: add_char_type(current_chars, [token.strip('\\')]) elif token_type == OPEN_GROUP: in_group = True last_token_group_start = True elif token_type == CLOSE_GROUP: in_group = False end_group = len([c for c in current_chars if c[0] != REPEAT_CHAR]) groups.append((start_group, end_group)) elif token_type == OPEN_SET: open_brackets += 1 current_set.append(token) elif token_type == CLOSE_SET: open_brackets -= 1 current_set.append(token) if open_brackets == 0: char_set = parse_regex_char_set(u''.join(current_set), current_filter=current_filter) if char_set: add_char_type(current_chars, char_set) current_set = [] elif token_type == QUOTED_STRING: token = token.strip("'") for c in token: add_char_type(current_chars, [c]) elif token_type == GROUP_REF: add_char_type(current_chars, [token.replace('$', GROUP_INDICATOR_CHAR)]) elif token_type == REVISIT: in_revisit = True current_chars = revisit_char_types elif token_type == REPEAT: current_chars[last_index].append(EMPTY_TRANSITION_CHAR) if not reverse: add_char_type(current_chars, [REPEAT_CHAR]) else: prev = current_chars.popleft() add_char_type(current_chars, [REPEAT_CHAR]) add_char_type(current_chars, prev) elif token_type == REPEAT_ONE: if not reverse: add_char_type(current_chars, [REPEAT_CHAR]) else: prev = current_chars.popleft() add_char_type(current_chars, [REPEAT_CHAR]) add_char_type(current_chars, prev) elif token_type == OPTIONAL: current_chars[last_index].append(EMPTY_TRANSITION_CHAR) elif token_type == HTML_ENTITY: add_char_type(current_chars, [replace_html_entity(token)]) elif token_type == CHARACTER: add_char_type(current_chars, [token]) elif token_type == SINGLE_QUOTE: add_char_type(current_chars, ["'"]) elif token_type in (UNICODE_CHARACTER, UNICODE_WIDE_CHARACTER): token = token.decode('unicode-escape') add_char_type(current_chars, [token]) elif token_type == WIDE_CHARACTER: add_char_type(current_chars, [token]) if in_group and last_token_group_start: start_group = len(current_chars) last_token_group_start = False return char_types, revisit_char_types, groups string_replacements = { u'[': u'\[', u']': u'\]', u'(': u'\(', u')': u'\)', u'{': u'\{', u'}': u'\{', u'$': u'\$', u'^': u'\^', u'-': u'\-', u'\\': u'\\\\', u'*': u'\*', u'+': u'\+', } escape_sequence_long_regex = re.compile(r'(\\x[0-9a-f]{2})([0-9a-f])', re.I) def replace_long_escape_sequence(s): def replace_match(m): return u'{}""{}'.format(m.group(1), m.group(2)) return escape_sequence_long_regex.sub(replace_match, s) def quote_string(s): return u'"{}"'.format(replace_long_escape_sequence(safe_decode(s).replace('"', '\\"'))) def char_types_string(char_types, escape=True): ''' Transforms the char_permutations output into a string suitable for simple parsing in C (characters and character sets only, no variables, unicode character properties or unions/intersections) ''' ret = [] for chars in char_types: template = u'{}' if len(chars) == 1 else u'[{}]' norm = [] for c in chars: if escape: c = string_replacements.get(c, c) norm.append(c) ret.append(template.format(u''.join(norm))) return u''.join(ret) def format_groups(char_types, groups): group_regex = [] last_end = 0 for start, end in groups: group_regex.append(char_types_string(char_types[last_end:start])) group_regex.append(u'(') group_regex.append(char_types_string(char_types[start:end])) group_regex.append(u')') last_end = end group_regex.append(char_types_string(char_types[last_end:])) return u''.join(group_regex) charset_regex = re.compile(r'(? transliteration_rule_source_t rules_source[] = {{ {all_rules} }}; transliteration_step_source_t steps_source[] = {{ {all_steps} }}; transliterator_source_t transliterators_source[] = {{ {all_transforms} }}; ''' transliterator_script_data_template = u''' #include "unicode_scripts.h" typedef struct script_transliteration_rule {{ script_language_t script_language; transliterator_index_t index; }} script_transliteration_rule_t; script_transliteration_rule_t script_transliteration_rules[] = {{ {rules} }}; char *script_transliterators[] = {{ {transliterators} }}; ''' script_transliterators = { 'arabic': {None: ['arabic-latin', 'arabic-latin-bgn'], 'fa': ['persian-latin-bgn'], 'ps': ['pashto-latin-bgn'], }, 'armenian': {None: ['armenian-latin-bgn']}, 'balinese': None, 'bamum': None, 'batak': None, 'bengali': {None: ['bengali-latin']}, 'bopomofo': None, 'braille': None, 'buginese': None, 'buhid': None, 'canadian_aboriginal': {None: ['canadianaboriginal-latin']}, 'cham': None, 'cherokee': None, 'common': {None: [LATIN_ASCII], 'de': [GERMAN_ASCII], 'et': [GERMAN_ASCII], 'da': [SCANDINAVIAN_ASCII, LATIN_ASCII], 'nb': [SCANDINAVIAN_ASCII, LATIN_ASCII], 'sv': [SCANDINAVIAN_ASCII, LATIN_ASCII], }, 'coptic': None, 'cyrillic': {None: ['cyrillic-latin'], 'be': ['belarusian-latin-bgn'], 'ru': ['russian-latin-bgn'], 'bg': ['bulgarian-latin-bgn'], 'kk': ['kazakh-latin-bgn'], 'ky': ['kirghiz-latin-bgn'], 'mk': ['macedonian-latin-bgn'], 'mn': ['mongolian-latin-bgn'], 'sr': ['serbian-latin-bgn'], 'uk': ['ukrainian-latin-bgn'], 'uz': ['uzbek-latin-bgn'], }, 'devanagari': {None: ['devanagari-latin']}, 'ethiopic': None, 'georgian': {None: ['georgian-latin', 'georgian-latin-bgn']}, 'glagolitic': None, 'greek': {None: ['greek-latin', 'greek-latin-bgn', 'greek-latin-ungegn']}, 'gujarati': {None: ['gujarati-latin']}, 'gurmukhi': {None: ['gurmukhi-latin']}, 'han': {None: ['han-latin']}, 'hangul': {None: ['hangul-latin']}, 'hanunoo': None, 'hebrew': {None: ['hebrew-latin', 'hebrew-latin-bgn']}, 'hiragana': {None: ['hiragana-latin']}, 'inherited': None, 'javanese': None, 'kannada': {None: ['kannada-latin']}, 'katakana': {None: ['katakana-latin', 'katakana-latin-bgn']}, 'kayah_li': None, 'khmer': None, 'lao': None, 'latin': {None: [LATIN_ASCII], 'de': [GERMAN_ASCII], 'et': [GERMAN_ASCII], 'da': [SCANDINAVIAN_ASCII, LATIN_ASCII], 'nb': [SCANDINAVIAN_ASCII, LATIN_ASCII], 'sv': [SCANDINAVIAN_ASCII, LATIN_ASCII], }, 'lepcha': None, 'limbu': None, 'lisu': None, 'malayalam': {None: ['malayalam-latin']}, 'mandaic': None, 'meetei_mayek': None, 'mongolian': None, 'myanmar': None, 'new_tai_lue': None, 'nko': None, 'ogham': None, 'ol_chiki': None, 'oriya': {None: ['oriya-latin']}, 'phags_pa': None, 'rejang': None, 'runic': None, 'samaritan': None, 'saurashtra': None, 'sinhala': None, 'sundanese': None, 'syloti_nagri': None, 'syriac': None, 'tagalog': None, 'tagbanwa': None, 'tai_le': None, 'tai_tham': None, 'tai_viet': None, 'tamil': {None: ['tamil-latin']}, 'telugu': {None: ['telugu-latin']}, 'thaana': {None: ['thaana-latin', 'maldivian-latin-bgn']}, 'thai': {None: ['thai-latin']}, 'tibetan': None, 'tifinagh': None, 'unknown': None, 'vai': None, 'yi': None } def write_transliterator_scripts_file(filename): transliterator_rule_template = '''{{{{{script_type}, {lang}}}, {{{start}, {length}}}}}''' rules = [] all_transliterators = [] index = 0 for script, i in unicode_script_ids.iteritems(): spec = script_transliterators.get(script.lower()) if not spec: continue script_type = 'SCRIPT_{}'.format(script.upper()) for lang, transliterators in spec.iteritems(): lang = '""' if not lang else quote_string(lang) num_transliterators = len(transliterators) rules.append(transliterator_rule_template.format(script_type=script_type, lang=lang, start=index, length=num_transliterators)) for trans in transliterators: all_transliterators.append(quote_string(trans)) index += num_transliterators template = transliterator_script_data_template.format(rules=''', '''.join(rules), transliterators=''', '''.join(all_transliterators)) f = open(filename, 'w') f.write(safe_encode(template)) def write_transliteration_data_file(filename): transforms, steps, rules = get_all_transform_rules() all_transforms = u''', '''.join([u'{{{}}}'.format(u','.join(t)) for t in transforms]) all_steps = u''', '''.join([u'{{{}}}'.format(u','.join(s)) for s in steps]) for r in rules: try: r = u','.join(r) except Exception: print('Exception in rule') print(r) all_rules = u''', '''.join([u'{{{}}}'.format(u','.join(r)) for r in rules]) template = transliteration_data_template.format( all_transforms=all_transforms, all_steps=all_steps, all_rules=all_rules, ) f = open(filename, 'w') f.write(safe_encode(template)) SRC_DIR = os.path.join(this_dir, os.pardir, os.pardir, os.pardir, 'src') TRANSLITERATION_DATA_FILENAME = 'transliteration_data.c' TRANSLITERATION_SCRIPTS_FILENAME = 'transliteration_scripts_data.c' def main(out_dir=SRC_DIR): write_transliteration_data_file(os.path.join(out_dir, TRANSLITERATION_DATA_FILENAME)) write_transliterator_scripts_file(os.path.join(out_dir, TRANSLITERATION_SCRIPTS_FILENAME)) if __name__ == '__main__': if len(sys.argv) > 1: main(sys.argv[1]) else: main()