Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- assets/i18n/i18n.py +46 -0
- assets/i18n/scan.py +71 -0
assets/i18n/i18n.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from pathlib import Path
|
3 |
+
from locale import getdefaultlocale
|
4 |
+
|
5 |
+
|
6 |
+
class I18nAuto:
|
7 |
+
LANGUAGE_PATH = "./assets/i18n/languages/"
|
8 |
+
|
9 |
+
def __init__(self, language=None):
|
10 |
+
language = language or getdefaultlocale()[0]
|
11 |
+
|
12 |
+
if self._language_exists(language):
|
13 |
+
self.language = language
|
14 |
+
else:
|
15 |
+
lang_prefix = language[:2]
|
16 |
+
available_languages = self._get_available_languages()
|
17 |
+
matching_languages = [
|
18 |
+
lang for lang in available_languages if lang.startswith(lang_prefix)
|
19 |
+
]
|
20 |
+
|
21 |
+
self.language = matching_languages[0] if matching_languages else "en_US"
|
22 |
+
|
23 |
+
self.language_map = self._load_language_list()
|
24 |
+
|
25 |
+
def _load_language_list(self):
|
26 |
+
try:
|
27 |
+
file_path = Path(self.LANGUAGE_PATH) / f"{self.language}.json"
|
28 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
29 |
+
return json.load(f)
|
30 |
+
except FileNotFoundError:
|
31 |
+
raise FileNotFoundError(
|
32 |
+
f"Failed to load language file for {self.language}. Check if the correct .json file exists."
|
33 |
+
)
|
34 |
+
|
35 |
+
def _get_available_languages(self):
|
36 |
+
language_files = [path.stem for path in Path(self.LANGUAGE_PATH).glob("*.json")]
|
37 |
+
return language_files
|
38 |
+
|
39 |
+
def _language_exists(self, language):
|
40 |
+
return (Path(self.LANGUAGE_PATH) / f"{language}.json").exists()
|
41 |
+
|
42 |
+
def __call__(self, key):
|
43 |
+
return self.language_map.get(key, key)
|
44 |
+
|
45 |
+
def print(self):
|
46 |
+
print(f"Using Language: {self.language}")
|
assets/i18n/scan.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
import json
|
3 |
+
from pathlib import Path
|
4 |
+
from collections import OrderedDict
|
5 |
+
|
6 |
+
|
7 |
+
def extract_i18n_strings(node):
|
8 |
+
i18n_strings = []
|
9 |
+
|
10 |
+
if (
|
11 |
+
isinstance(node, ast.Call)
|
12 |
+
and isinstance(node.func, ast.Name)
|
13 |
+
and node.func.id == "i18n"
|
14 |
+
):
|
15 |
+
for arg in node.args:
|
16 |
+
if isinstance(arg, ast.Str):
|
17 |
+
i18n_strings.append(arg.s)
|
18 |
+
|
19 |
+
for child_node in ast.iter_child_nodes(node):
|
20 |
+
i18n_strings.extend(extract_i18n_strings(child_node))
|
21 |
+
|
22 |
+
return i18n_strings
|
23 |
+
|
24 |
+
|
25 |
+
def process_file(file_path):
|
26 |
+
with open(file_path, "r") as f:
|
27 |
+
code = f.read()
|
28 |
+
if "I18nAuto" in code:
|
29 |
+
tree = ast.parse(code)
|
30 |
+
i18n_strings = extract_i18n_strings(tree)
|
31 |
+
print(file_path, len(i18n_strings))
|
32 |
+
return i18n_strings
|
33 |
+
return []
|
34 |
+
|
35 |
+
|
36 |
+
# Use pathlib for file handling
|
37 |
+
py_files = Path(".").rglob("*.py")
|
38 |
+
|
39 |
+
# Use a set to store unique strings
|
40 |
+
code_keys = set()
|
41 |
+
|
42 |
+
for py_file in py_files:
|
43 |
+
strings = process_file(py_file)
|
44 |
+
code_keys.update(strings)
|
45 |
+
|
46 |
+
print()
|
47 |
+
print("Total unique:", len(code_keys))
|
48 |
+
|
49 |
+
standard_file = "languages/en_US.json"
|
50 |
+
with open(standard_file, "r", encoding="utf-8") as f:
|
51 |
+
standard_data = json.load(f, object_pairs_hook=OrderedDict)
|
52 |
+
standard_keys = set(standard_data.keys())
|
53 |
+
|
54 |
+
# Combine unused and missing keys sections
|
55 |
+
unused_keys = standard_keys - code_keys
|
56 |
+
missing_keys = code_keys - standard_keys
|
57 |
+
|
58 |
+
print("Unused keys:", len(unused_keys))
|
59 |
+
for unused_key in unused_keys:
|
60 |
+
print("\t", unused_key)
|
61 |
+
|
62 |
+
print("Missing keys:", len(missing_keys))
|
63 |
+
for missing_key in missing_keys:
|
64 |
+
print("\t", missing_key)
|
65 |
+
|
66 |
+
code_keys_dict = OrderedDict((s, s) for s in code_keys)
|
67 |
+
|
68 |
+
# Use context manager for writing back to the file
|
69 |
+
with open(standard_file, "w", encoding="utf-8") as f:
|
70 |
+
json.dump(code_keys_dict, f, ensure_ascii=False, indent=4, sort_keys=True)
|
71 |
+
f.write("\n")
|