np
Browse files- app_utils.py +4 -8
app_utils.py
CHANGED
@@ -33,9 +33,9 @@ from wordcloud import WordCloud
|
|
33 |
import base64
|
34 |
import time
|
35 |
|
36 |
-
|
37 |
-
#
|
38 |
-
|
39 |
|
40 |
timestr = time.strftime("%Y%m%d-%H%M%S")
|
41 |
|
@@ -113,11 +113,7 @@ def nlp_analysis(text):
|
|
113 |
|
114 |
|
115 |
def find_entities(text):
|
116 |
-
|
117 |
-
stanford_ner_model_path = 'english.all.3class.distsim.crf.ser.gz'
|
118 |
-
with open(stanford_ner_jar_path, 'rb') as stanford_ner_jar:
|
119 |
-
with open(stanford_ner_model_path, 'rb') as stanford_ner_model:
|
120 |
-
stan = StanfordNERTagger(stanford_ner_model_path, stanford_ner_jar_path)
|
121 |
text=text.replace("\n\n","\n")
|
122 |
tokens = nltk.word_tokenize(text)
|
123 |
tagged_tokens = stan.tag(tokens)
|
|
|
33 |
import base64
|
34 |
import time
|
35 |
|
36 |
+
stanford_ner_jar = open('stanform-ner.jar','rb')
|
37 |
+
# Path to the pre-trained NER model file
|
38 |
+
stanford_ner_model =open('english.all.3class.distsim.crf.ser.gz','rb')
|
39 |
|
40 |
timestr = time.strftime("%Y%m%d-%H%M%S")
|
41 |
|
|
|
113 |
|
114 |
|
115 |
def find_entities(text):
|
116 |
+
stan = StanfordNERTagger(stanford_ner_model_path, stanford_ner_jar_path)
|
|
|
|
|
|
|
|
|
117 |
text=text.replace("\n\n","\n")
|
118 |
tokens = nltk.word_tokenize(text)
|
119 |
tagged_tokens = stan.tag(tokens)
|