DreamStream-1 commited on
Commit
2080e4d
1 Parent(s): ebdf65e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +152 -128
app.py CHANGED
@@ -3,14 +3,26 @@ import re
3
  from datetime import datetime
4
  import PyPDF2
5
  import torch
6
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
  from sentence_transformers import SentenceTransformer, util
8
- from groq import Groq
9
  import gradio as gr
10
 
11
- # Set your API key for Groq
12
- os.environ["GROQ_API_KEY"] = "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"
13
- client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  # --- PDF/Text Extraction Functions --- #
16
  def extract_text_from_file(file_path):
@@ -23,39 +35,45 @@ def extract_text_from_file(file_path):
23
  raise ValueError("Unsupported file type. Only PDF and TXT files are accepted.")
24
 
25
  def extract_text_from_pdf(pdf_file_path):
26
- """Extracts text from a PDF file."""
 
27
  with open(pdf_file_path, 'rb') as pdf_file:
28
  pdf_reader = PyPDF2.PdfReader(pdf_file)
29
- text = ''.join(page.extract_text() for page in pdf_reader.pages if page.extract_text())
30
- return text
 
 
 
 
 
31
 
32
  def extract_text_from_txt(txt_file_path):
33
  """Extracts text from a .txt file."""
34
  with open(txt_file_path, 'r', encoding='utf-8') as txt_file:
35
  return txt_file.read()
36
 
37
- # --- Skill Extraction with Llama Model --- #
38
- def extract_skills_llama(text):
39
- """Extracts skills from the text using the Llama model via Groq API."""
40
- try:
41
- response = client.chat.completions.create(
42
- messages=[{"role": "user", "content": f"Extract skills from the following text: {text}"}],
43
- model="llama3-70b-8192",
44
- )
45
- skills = response.choices[0].message.content.split(', ') # Expecting a comma-separated list
46
- return skills
47
- except Exception as e:
48
- raise RuntimeError(f"Error during skill extraction: {e}")
49
 
50
  # --- Job Description Processing Function --- #
51
  def process_job_description(text):
52
  """Extracts skills or relevant keywords from the job description."""
53
- return extract_skills_llama(text)
54
 
55
  # --- Qualification and Experience Extraction --- #
56
  def extract_qualifications(text):
57
  """Extracts qualifications from text (e.g., degrees, certifications)."""
58
- qualifications = re.findall(r'(bachelor|master|phd|certified|degree)', text, re.IGNORECASE)
59
  return qualifications if qualifications else ['No specific qualifications found']
60
 
61
  def extract_experience(text):
@@ -65,121 +83,127 @@ def extract_experience(text):
65
  experience_years = [int(year[0]) for year in experience_years]
66
  return experience_years, job_titles
67
 
68
- # --- Updated Resume Analysis Function --- #
69
- def analyze_resume(resume_file, job_description_file):
70
- if not resume_file or not job_description_file:
71
- return "", "", "", "Please upload both files."
72
-
73
- # Load and preprocess resume and job description
74
- resume_text = extract_text_from_file(resume_file)
75
- job_description_text = extract_text_from_file(job_description_file)
76
-
77
- # Extract skills, qualifications, and experience from the resume
78
- resume_skills = extract_skills_llama(resume_text)
79
- resume_qualifications = extract_qualifications(resume_text)
80
- resume_experience, _ = extract_experience(resume_text)
81
- total_experience = sum(resume_experience) # Assuming this returns a list of experiences
82
-
83
- # Extract required skills, qualifications, and experience from the job description
84
- job_description_skills = process_job_description(job_description_text)
85
- job_description_qualifications = extract_qualifications(job_description_text)
86
- job_description_experience, _ = extract_experience(job_description_text)
87
- required_experience = sum(job_description_experience) # Assuming total years required
88
-
89
- # Calculate similarity scores
90
- skills_similarity = len(set(resume_skills).intersection(set(job_description_skills))) / len(job_description_skills) * 100 if job_description_skills else 0
91
- qualifications_similarity = len(set(resume_qualifications).intersection(set(job_description_qualifications))) / len(job_description_qualifications) * 100 if job_description_qualifications else 0
92
- experience_similarity = 1.0 if total_experience >= required_experience else 0.0
93
-
94
- # Fit assessment logic
95
- fit_score = 0
96
- if total_experience >= required_experience:
97
- fit_score += 1
98
- if skills_similarity > 50: # Define a threshold for skills match
99
- fit_score += 1
100
- if qualifications_similarity > 50: # Define a threshold for qualifications match
101
- fit_score += 1
102
-
103
- # Determine fit
104
- if fit_score == 3:
105
- fit_assessment = "Strong fit"
106
- elif fit_score == 2:
107
- fit_assessment = "Moderate fit"
108
  else:
109
- fit_assessment = "Not a fit"
110
-
111
- # Prepare output messages for tab display
112
- summary_message = (
113
- f"### Summary of Analysis\n"
114
- f"- **Skills Similarity**: {skills_similarity:.2f}%\n"
115
- f"- **Qualifications Similarity**: {qualifications_similarity:.2f}%\n"
116
- f"- **Experience Similarity**: {experience_similarity * 100:.2f}%\n"
117
- f"- **Candidate Experience**: {total_experience} years\n"
118
- f"- **Fit Assessment**: {fit_assessment}\n"
119
- )
120
-
121
- skills_message = (
122
- f"### Skills Overview\n"
123
- f"- **Resume Skills:**\n" + "\n".join(f" - {skill}" for skill in resume_skills) + "\n"
124
- f"- **Job Description Skills:**\n" + "\n".join(f" - {skill}" for skill in job_description_skills) + "\n"
125
- )
126
-
127
- qualifications_message = (
128
- f"### Qualifications Overview\n"
129
- f"- **Resume Qualifications:** " + ", ".join(resume_qualifications) + "\n" +
130
- f"- **Job Description Qualifications:** " + ", ".join(job_description_qualifications) + "\n"
 
 
131
  )
132
 
133
- experience_message = (
134
- f"### Experience Overview\n"
135
- f"- **Total Experience:** {total_experience} years\n"
136
- f"- **Required Experience:** {required_experience} years\n"
137
- )
138
-
139
- return summary_message, skills_message, qualifications_message, experience_message
140
-
141
-
142
- # --- Gradio Interface --- #
143
- def run_gradio_interface():
144
- with gr.Blocks() as demo:
145
- gr.Markdown("## Resume and Job Description Analyzer")
146
- resume_file = gr.File(label="Upload Resume")
147
- job_description_file = gr.File(label="Upload Job Description")
148
-
149
- # Create placeholders for output messages
150
- summary_output = gr.Textbox(label="Summary of Analysis", interactive=False, lines=10)
151
- skills_output = gr.Textbox(label="Skills Overview", interactive=False, lines=10)
152
- qualifications_output = gr.Textbox(label="Qualifications Overview", interactive=False, lines=10)
153
- experience_output = gr.Textbox(label="Experience Overview", interactive=False, lines=10)
154
 
155
- # Create tabs for output sections
156
- with gr.Tab("Analysis Summary"):
157
- gr.Markdown("### Summary of Analysis")
158
- summary_output # This automatically renders the output box
159
 
160
- with gr.Tab("Skills Overview"):
161
- gr.Markdown("### Skills Overview")
162
- skills_output # This automatically renders the output box
 
 
 
163
 
164
- with gr.Tab("Qualifications Overview"):
165
- gr.Markdown("### Qualifications Overview")
166
- qualifications_output # This automatically renders the output box
 
 
167
 
168
- with gr.Tab("Experience Overview"):
169
- gr.Markdown("### Experience Overview")
170
- experience_output # This automatically renders the output box
171
-
172
- analyze_button = gr.Button("Analyze")
 
 
 
 
173
 
174
- # Button action
175
- analyze_button.click(analyze, inputs=[resume_file, job_description_file], outputs=[summary_output, skills_output, qualifications_output, experience_output])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
 
177
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
- def analyze(resume, job_desc):
180
- # Always ensure the correct number of return values
181
- summary, skills, qualifications, experience = analyze_resume(resume, job_desc)
182
- return summary, skills, qualifications, experience
 
 
 
 
 
 
183
 
184
  if __name__ == "__main__":
185
- run_gradio_interface()
 
3
  from datetime import datetime
4
  import PyPDF2
5
  import torch
6
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSeq2SeqLM
7
  from sentence_transformers import SentenceTransformer, util
 
8
  import gradio as gr
9
 
10
+ # --- Model Loading with Caching --- #
11
+ class ModelCache:
12
+ _tokenizers = {}
13
+ _models = {}
14
+
15
+ @classmethod
16
+ def get_model(cls, model_name):
17
+ if model_name not in cls._models:
18
+ cls._models[model_name] = AutoModelForSeq2SeqLM.from_pretrained(model_name)
19
+ return cls._models[model_name]
20
+
21
+ @classmethod
22
+ def get_tokenizer(cls, model_name):
23
+ if model_name not in cls._tokenizers:
24
+ cls._tokenizers[model_name] = AutoTokenizer.from_pretrained(model_name)
25
+ return cls._tokenizers[model_name]
26
 
27
  # --- PDF/Text Extraction Functions --- #
28
  def extract_text_from_file(file_path):
 
35
  raise ValueError("Unsupported file type. Only PDF and TXT files are accepted.")
36
 
37
  def extract_text_from_pdf(pdf_file_path):
38
+ """Extracts text from a PDF file with logging for page extraction."""
39
+ text = []
40
  with open(pdf_file_path, 'rb') as pdf_file:
41
  pdf_reader = PyPDF2.PdfReader(pdf_file)
42
+ for i, page in enumerate(pdf_reader.pages):
43
+ page_text = page.extract_text()
44
+ if page_text:
45
+ text.append(page_text)
46
+ else:
47
+ print(f"Warning: Page {i} could not be extracted.")
48
+ return ''.join(text)
49
 
50
  def extract_text_from_txt(txt_file_path):
51
  """Extracts text from a .txt file."""
52
  with open(txt_file_path, 'r', encoding='utf-8') as txt_file:
53
  return txt_file.read()
54
 
55
+ # --- Skill Extraction with Hugging Face --- #
56
+ def extract_skills_huggingface(text):
57
+ """Extracts skills from the text using a Hugging Face model."""
58
+ model_name = "google/flan-t5-base"
59
+ tokenizer = ModelCache.get_tokenizer(model_name)
60
+ model = ModelCache.get_model(model_name)
61
+
62
+ input_text = f"Extract skills from the following text: {text}"
63
+ inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)
64
+ outputs = model.generate(**inputs)
65
+ skills = tokenizer.decode(outputs[0], skip_special_tokens=True).split(', ') # Expecting a comma-separated list
66
+ return skills
67
 
68
  # --- Job Description Processing Function --- #
69
  def process_job_description(text):
70
  """Extracts skills or relevant keywords from the job description."""
71
+ return extract_skills_huggingface(text)
72
 
73
  # --- Qualification and Experience Extraction --- #
74
  def extract_qualifications(text):
75
  """Extracts qualifications from text (e.g., degrees, certifications)."""
76
+ qualifications = re.findall(r'\b(bachelor|master|phd|certified|degree|diploma|qualification|certification)\b', text, re.IGNORECASE)
77
  return qualifications if qualifications else ['No specific qualifications found']
78
 
79
  def extract_experience(text):
 
83
  experience_years = [int(year[0]) for year in experience_years]
84
  return experience_years, job_titles
85
 
86
+ # --- Semantic Similarity Calculation --- #
87
+ def calculate_semantic_similarity(text1, text2):
88
+ """Calculates semantic similarity using a sentence transformer model and returns the score as a percentage."""
89
+ model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
90
+ embeddings1 = model.encode(text1, convert_to_tensor=True)
91
+ embeddings2 = model.encode(text2, convert_to_tensor=True)
92
+ similarity_score = util.pytorch_cos_sim(embeddings1, embeddings2).item()
93
+
94
+ # Convert similarity score to percentage
95
+ similarity_percentage = similarity_score * 100
96
+ return similarity_percentage
97
+
98
+ # --- Thresholds --- #
99
+ def categorize_similarity(score):
100
+ """Categorizes the similarity score into thresholds for better insights."""
101
+ if score >= 80:
102
+ return "High Match"
103
+ elif score >= 50:
104
+ return "Moderate Match"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  else:
106
+ return "Low Match"
107
+
108
+ # --- Communication Generation with Enhanced Response --- #
109
+ def communication_generator(resume_skills, job_description_skills, skills_similarity, qualifications_similarity, experience_similarity, max_length=200):
110
+ """Generates a more detailed communication response based on similarity scores."""
111
+ model_name = "google/flan-t5-base"
112
+ tokenizer = ModelCache.get_tokenizer(model_name)
113
+ model = ModelCache.get_model(model_name)
114
+
115
+ # Assess candidate fit based on similarity scores
116
+ fit_status = "strong fit" if skills_similarity >= 80 and qualifications_similarity >= 80 and experience_similarity >= 80 else \
117
+ "moderate fit" if skills_similarity >= 50 else "weak fit"
118
+
119
+ # Create a detailed communication message based on match levels
120
+ message = (
121
+ f"After a detailed analysis of the candidate's resume, we found the following insights:\n\n"
122
+ f"- **Skills Match**: {skills_similarity:.2f}% ({categorize_similarity(skills_similarity)})\n"
123
+ f"- **Qualifications Match**: {qualifications_similarity:.2f}% ({categorize_similarity(qualifications_similarity)})\n"
124
+ f"- **Experience Match**: {experience_similarity:.2f}% ({categorize_similarity(experience_similarity)})\n\n"
125
+ f"The overall assessment indicates that the candidate is a {fit_status} for the role. "
126
+ f"Skills such as {', '.join(resume_skills)} align {categorize_similarity(skills_similarity).lower()} with the job's requirements of {', '.join(job_description_skills)}. "
127
+ f"In terms of qualifications and experience, the candidate shows a {categorize_similarity(qualifications_similarity).lower()} match with the role's needs. "
128
+ f"Based on these findings, we believe the candidate could potentially excel in the role, "
129
+ f"but additional evaluation or interviews are recommended for further clarification."
130
  )
131
 
132
+ inputs = tokenizer(message, return_tensors="pt", padding=True, truncation=True)
133
+ response = model.generate(**inputs, max_length=max_length, num_beams=4, early_stopping=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
+ return tokenizer.decode(response[0], skip_special_tokens=True)
 
 
 
136
 
137
+ # --- Sentiment Analysis --- #
138
+ def sentiment_analysis(text):
139
+ """Analyzes the sentiment of the text using a Hugging Face model."""
140
+ model_name = "distilbert-base-uncased-finetuned-sst-2-english"
141
+ tokenizer = ModelCache.get_tokenizer(model_name)
142
+ model = ModelCache.get_model(model_name)
143
 
144
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
145
+ with torch.no_grad():
146
+ outputs = model(**inputs)
147
+ predicted_sentiment = torch.argmax(outputs.logits).item()
148
+ return ["Negative", "Neutral", "Positive"][predicted_sentiment]
149
 
150
+ # --- Updated Resume Analysis Function --- #
151
+ def analyze_resume(resume_file, job_description_file):
152
+ """Analyzes the resume and job description, returning similarity score, skills, qualifications, and experience matching."""
153
+ # Extract resume and job description text
154
+ try:
155
+ resume_text = extract_text_from_file(resume_file.name)
156
+ job_description_text = extract_text_from_file(job_description_file.name)
157
+ except ValueError as ve:
158
+ return str(ve)
159
 
160
+ # Extract skills, qualifications, and experience
161
+ resume_skills = extract_skills_huggingface(resume_text)
162
+ job_description_skills = process_job_description(job_description_text)
163
+ resume_qualifications = extract_qualifications(resume_text)
164
+ job_description_qualifications = extract_qualifications(job_description_text)
165
+ resume_experience, resume_job_titles = extract_experience(resume_text)
166
+ job_description_experience, job_description_titles = extract_experience(job_description_text)
167
+
168
+ # Calculate semantic similarity for different sections in percentages
169
+ skills_similarity = calculate_semantic_similarity(' '.join(resume_skills), ' '.join(job_description_skills))
170
+ qualifications_similarity = calculate_semantic_similarity(' '.join(resume_qualifications), ' '.join(job_description_qualifications))
171
+ experience_similarity = calculate_semantic_similarity(' '.join([str(e) for e in resume_experience]), ' '.join([str(e) for e in job_description_experience]))
172
+
173
+ # Generate a communication response based on the similarity percentages
174
+ communication_response = communication_generator(
175
+ resume_skills, job_description_skills,
176
+ skills_similarity, qualifications_similarity, experience_similarity
177
+ )
178
 
179
+ # Perform Sentiment Analysis
180
+ sentiment = sentiment_analysis(resume_text)
181
+
182
+ # Return the results including thresholds and percentage scores
183
+ return (
184
+ f"Skills Similarity: {skills_similarity:.2f}% ({categorize_similarity(skills_similarity)})",
185
+ f"Qualifications Similarity: {qualifications_similarity:.2f}% ({categorize_similarity(qualifications_similarity)})",
186
+ f"Experience Similarity: {experience_similarity:.2f}% ({categorize_similarity(experience_similarity)})",
187
+ communication_response,
188
+ f"Sentiment Analysis: {sentiment}",
189
+ f"Resume Skills: {', '.join(resume_skills)}",
190
+ f"Job Description Skills: {', '.join(job_description_skills)}",
191
+ f"Resume Qualifications: {', '.join(resume_qualifications)}",
192
+ f"Job Description Qualifications: {', '.join(job_description_qualifications)}",
193
+ f"Resume Experience: {', '.join(map(str, resume_experience))} years, Titles: {', '.join(resume_job_titles)}",
194
+ f"Job Description Experience: {', '.join(map(str, job_description_experience))} years, Titles: {', '.join(job_description_titles)}"
195
+ )
196
 
197
+ # --- Gradio Interface --- #
198
+ iface = gr.Interface(
199
+ fn=analyze_resume,
200
+ inputs=["file", "file"],
201
+ outputs=[
202
+ "text", "text", "text", "text", "text", "text", "text", "text", "text", "text", "text"
203
+ ],
204
+ title="Resume Analysis Tool",
205
+ description="Analyze a resume against a job description to evaluate skills, qualifications, experience, and sentiment."
206
+ )
207
 
208
  if __name__ == "__main__":
209
+ iface.launch()