Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -14,8 +14,9 @@ pad_id = tokenizer._convert_token_to_id_with_added_voc("<pad>")
|
|
14 |
CREOLE = {"Hawaiian Pidgin": "hwc", "Saint Lucian Creole": "acf", "Belizean Creole": "bzj", "Chavacano Creole": "cbk", "Seychellois Creole": "crs", "Sranan Tongo": "srn", "Aukan": "djk", "Gullah": "gul", "San Andrés–Providencia Creole": "icr", "Jamaican Creole": "jam", "Mauritian Creole": "mfe", "Papiamento": "pap", "Pijin": "pis", "Tok Pisin": "tpi", "Torres Strait Creole": "tcs", "Australian Kriol": "rop", "Sango": "sag", "Saramaccan": "srm", "Bislama": "bis", "Nigerian": "pcm", "Sierra Leonean Creole": "kri", "Haitian Creole": "hat", "Kupang Malay": "mkn", "Tetun Dili": "tdt", "Malay Baba": "mbf", "Kituba": "ktu"}
|
15 |
|
16 |
|
17 |
-
def generate(input,
|
18 |
-
|
|
|
19 |
inp = tokenizer(input.strip() + " </s> <2" + slang + ">",
|
20 |
add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
21 |
model_output = model.generate(inp, use_cache=True, num_beams=1, max_length=100, min_length=1, early_stopping=True, pad_token_id=pad_id,
|
|
|
14 |
CREOLE = {"Hawaiian Pidgin": "hwc", "Saint Lucian Creole": "acf", "Belizean Creole": "bzj", "Chavacano Creole": "cbk", "Seychellois Creole": "crs", "Sranan Tongo": "srn", "Aukan": "djk", "Gullah": "gul", "San Andrés–Providencia Creole": "icr", "Jamaican Creole": "jam", "Mauritian Creole": "mfe", "Papiamento": "pap", "Pijin": "pis", "Tok Pisin": "tpi", "Torres Strait Creole": "tcs", "Australian Kriol": "rop", "Sango": "sag", "Saramaccan": "srm", "Bislama": "bis", "Nigerian": "pcm", "Sierra Leonean Creole": "kri", "Haitian Creole": "hat", "Kupang Malay": "mkn", "Tetun Dili": "tdt", "Malay Baba": "mbf", "Kituba": "ktu"}
|
15 |
|
16 |
|
17 |
+
def generate(input, slang, tlang):
|
18 |
+
slang = CREOLE[slang]
|
19 |
+
tlang = CREOLE[tlang]
|
20 |
inp = tokenizer(input.strip() + " </s> <2" + slang + ">",
|
21 |
add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
22 |
model_output = model.generate(inp, use_cache=True, num_beams=1, max_length=100, min_length=1, early_stopping=True, pad_token_id=pad_id,
|