Spaces:
Runtime error
Runtime error
feat: Add initial model for background summarizer
Browse files- util/__pycache__/summarizer.cpython-39.pyc +0 -0
- util/summarizer.py +63 -6
- util/textproc.py +26 -0
util/__pycache__/summarizer.cpython-39.pyc
CHANGED
Binary files a/util/__pycache__/summarizer.cpython-39.pyc and b/util/__pycache__/summarizer.cpython-39.pyc differ
|
|
util/summarizer.py
CHANGED
@@ -1,17 +1,74 @@
|
|
|
|
|
|
|
|
1 |
|
2 |
# TODO: add pre-trained summarizer models
|
3 |
# Placeholder text for testing input
|
4 |
test_text = """
|
5 |
Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.
|
6 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
def generate_abs_summary(abstract, min_char_abs):
|
9 |
-
return "Abstract" + test_text
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
|
13 |
-
return "Background" + test_text
|
14 |
|
15 |
|
16 |
-
def generate_claims_summary(claims, min_char_claims):
|
17 |
-
|
|
|
1 |
+
from transformers import BigBirdPegasusForConditionalGeneration, AutoTokenizer
|
2 |
+
|
3 |
+
# by default encoder-attention is `block_sparse` with num_random_blocks=3, block_size=64
|
4 |
|
5 |
# TODO: add pre-trained summarizer models
|
6 |
# Placeholder text for testing input
|
7 |
test_text = """
|
8 |
Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.
|
9 |
"""
|
10 |
+
summary_options = ["Abstract", "Background", "Claims"]
|
11 |
+
|
12 |
+
def get_word_index(s, idx):
|
13 |
+
words = re.findall(r'\s*\S+\s*', s)
|
14 |
+
return sum(map(len, words[:idx])) + len(words[idx]) - len(words[idx].lstrip())
|
15 |
+
|
16 |
+
|
17 |
+
class PatentSummarizer():
|
18 |
+
def __init__(self, base_model_name="google/bigbird-pegasus-large-bigpatent"):
|
19 |
+
# Possible to tweak other summaries with different models in the future
|
20 |
+
self.model = dict()
|
21 |
+
self.tokenizer = dict()
|
22 |
+
|
23 |
+
self.base_model = BigBirdPegasusForConditionalGeneration.from_pretrained(base_model_name)
|
24 |
+
self.base_tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
25 |
+
|
26 |
+
self.max_word_input = 1000
|
27 |
+
|
28 |
+
|
29 |
+
def pipeline(patent_information, summaries_generated, min_char_abs, min_char_bg, min_char_claims):
|
30 |
+
# TODO: add checker if valid patent info, return None if invalid
|
31 |
+
# TODO: add scraper to get document
|
32 |
+
|
33 |
+
# TODO: add parser to get the following info from the base document:
|
34 |
+
abstract, background, claims = None, None, None
|
35 |
+
|
36 |
+
summaries = list()
|
37 |
+
if "Abstract" in summaries_generated:
|
38 |
+
abstract_summary = summarizer.generate_abs_summary(abstract, min_char_abs)
|
39 |
+
summaries.append(abstract_summary)
|
40 |
+
else:
|
41 |
+
summaries.append(None)
|
42 |
+
|
43 |
+
if "Background" in summaries_generated:
|
44 |
+
background_summary = summarizer.generate_bg_summary(background, min_char_bg)
|
45 |
+
summaries.append(background_summary)
|
46 |
+
else:
|
47 |
+
summaries.append(None)
|
48 |
+
|
49 |
+
if "Claims" in summaries_generated:
|
50 |
+
claims_summary = summarizer.generate_claims_summary(claims, min_char_claims)
|
51 |
+
summaries.append(claims_summary)
|
52 |
+
else:
|
53 |
+
summaries.append(None)
|
54 |
+
|
55 |
+
return summaries
|
56 |
+
|
57 |
+
|
58 |
+
def generate_abs_summary(abstract, min_char_abs):
|
59 |
+
return "Abstract" + test_text
|
60 |
|
|
|
|
|
61 |
|
62 |
+
def generate_bg_summary(background, min_char_bg):
|
63 |
+
stop_idx = get_word_index(background, self.max_word_input)
|
64 |
+
inputs = self.base_tokenizer(background[0:stop_idx],
|
65 |
+
return_tensors='pt')
|
66 |
+
prediction = self.base_model.generate(**inputs)
|
67 |
+
bg_summary = self.base_tokenizer.batch_decode(prediction)
|
68 |
+
bg_summary = textproc.clean_text(bg_summary[0])
|
69 |
|
70 |
+
return bg_summary
|
|
|
71 |
|
72 |
|
73 |
+
def generate_claims_summary(claims, min_char_claims):
|
74 |
+
return "Claims" + test_text
|
util/textproc.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
def clean_text(text):
|
4 |
+
# html pre-proc
|
5 |
+
reg = re.compile(r'<.*?>')
|
6 |
+
cleaned = reg.sub('', text)
|
7 |
+
# cleaned = re.sub(r'\s([?.!"](?:\s|$))', r'\1', cleaned)
|
8 |
+
cleaned = re.sub(r'\([^)]*\)', '', cleaned)
|
9 |
+
# reg = re.compile(r'[\n\r\t]')
|
10 |
+
# cleaned = reg.sub(" ", cleaned)
|
11 |
+
# cleaned = re.sub('\.(?!$)', '', cleaned) # remove periods in between sentence
|
12 |
+
cleaned = re.sub(r"(\w)([A-Z]+)", r'.', cleaned)
|
13 |
+
cleaned = cleaned.strip()
|
14 |
+
cleaned = cleaned.lstrip()
|
15 |
+
cleaned = "".join(ch for ch in cleaned if unicodedata.category(ch)[0]!="C")
|
16 |
+
cleaned = re.sub(' +', ' ', cleaned)
|
17 |
+
cleaned = cleaned.replace(";", ", and")
|
18 |
+
cleaned = cleaned.replace(":", "")
|
19 |
+
cleaned = cleaned.replace(" .", ".")
|
20 |
+
cleaned = cleaned.replace(" ,", ",")
|
21 |
+
cleaned = cleaned.replace("\xa0", " ")
|
22 |
+
cleaned = cleaned.lstrip('0123456789.- ') # remove nums at start
|
23 |
+
cleaned = re.sub(r'\b(\w+)( \1\b)+', r'\1', cleaned) #remove repeated consecutive words
|
24 |
+
|
25 |
+
# cleaned = cleaned.strip()
|
26 |
+
return cleaned
|