Spaces:
Sleeping
Sleeping
import os | |
import re | |
from datetime import datetime | |
import PyPDF2 | |
import torch | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSeq2SeqLM | |
from sentence_transformers import SentenceTransformer, util | |
from groq import Groq | |
import gradio as gr | |
from docxtpl import DocxTemplate | |
# Set your API key for Groq | |
os.environ["GROQ_API_KEY"] = "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" | |
client = Groq(api_key=os.environ.get("GROQ_API_KEY")) | |
# --- PDF/Text Extraction Functions --- # | |
def extract_text_from_file(file_path): | |
"""Extracts text from PDF or TXT files based on file extension.""" | |
if file_path.endswith('.pdf'): | |
return extract_text_from_pdf(file_path) | |
elif file_path.endswith('.txt'): | |
return extract_text_from_txt(file_path) | |
else: | |
raise ValueError("Unsupported file type. Only PDF and TXT files are accepted.") | |
def extract_text_from_pdf(pdf_file_path): | |
"""Extracts text from a PDF file.""" | |
with open(pdf_file_path, 'rb') as pdf_file: | |
pdf_reader = PyPDF2.PdfReader(pdf_file) | |
text = ''.join(page.extract_text() for page in pdf_reader.pages if page.extract_text()) | |
return text | |
def extract_text_from_txt(txt_file_path): | |
"""Extracts text from a .txt file.""" | |
with open(txt_file_path, 'r', encoding='utf-8') as txt_file: | |
return txt_file.read() | |
# --- Skill Extraction with Llama Model --- # | |
def extract_skills_llama(text): | |
"""Extracts skills from the text using the Llama model via Groq API.""" | |
try: | |
response = client.chat.completions.create( | |
messages=[{"role": "user", "content": f"Extract skills from the following text: {text}"}], | |
model="llama3-70b-8192", | |
) | |
skills = response.choices[0].message.content.split(', ') # Expecting a comma-separated list | |
return skills | |
except Exception as e: | |
raise RuntimeError(f"Error during skill extraction: {e}") | |
# --- Job Description Processing Function --- # | |
def process_job_description(text): | |
"""Extracts skills or relevant keywords from the job description.""" | |
return extract_skills_llama(text) | |
# --- Qualification and Experience Extraction --- # | |
def extract_qualifications(text): | |
"""Extracts qualifications from text (e.g., degrees, certifications).""" | |
qualifications = re.findall(r'(bachelor|master|phd|certified|degree)', text, re.IGNORECASE) | |
return qualifications if qualifications else ['No specific qualifications found'] | |
def extract_experience(text): | |
"""Extracts years of experience from the text.""" | |
experience_years = re.findall(r'(\d+)\s*(years|year) of experience', text, re.IGNORECASE) | |
job_titles = re.findall(r'\b(software engineer|developer|manager|analyst)\b', text, re.IGNORECASE) | |
experience_years = [int(year[0]) for year in experience_years] | |
return experience_years, job_titles | |
# --- Summarization Function --- # | |
def summarize_experience(experience_text): | |
"""Summarizes the experience text using a pre-trained model.""" | |
model_name = "facebook/bart-large-cnn" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
inputs = tokenizer(experience_text, return_tensors="pt", max_length=1024, truncation=True) | |
summary_ids = model.generate(inputs['input_ids'], max_length=150, min_length=30, length_penalty=2.0, num_beams=4, early_stopping=True) | |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
return summary | |
# --- Semantic Similarity Calculation --- # | |
def calculate_semantic_similarity(text1, text2): | |
"""Calculates semantic similarity using a sentence transformer model and returns the score as a percentage.""" | |
model = SentenceTransformer('paraphrase-MiniLM-L6-v2') | |
embeddings1 = model.encode(text1, convert_to_tensor=True) | |
embeddings2 = model.encode(text2, convert_to_tensor=True) | |
similarity_score = util.pytorch_cos_sim(embeddings1, embeddings2).item() | |
# Convert similarity score to percentage | |
similarity_percentage = similarity_score * 100 | |
return similarity_percentage | |
# --- Communication Generation with Enhanced Response --- # | |
def communication_generator(resume_skills, job_description_skills, skills_similarity, qualifications_similarity, experience_similarity, candidate_experience): | |
"""Generates a detailed communication response based on similarity scores and additional criteria.""" | |
# Assess candidate fit based on similarity scores | |
fit_status = "strong fit" if skills_similarity >= 80 and qualifications_similarity >= 80 and experience_similarity >= 80 else \ | |
"moderate fit" if skills_similarity >= 50 else "weak fit" | |
# Build a message that includes a recommendation based on various assessments | |
if fit_status == "strong fit": | |
recommendation = "We recommend moving forward with this candidate, as they demonstrate a high level of alignment with the role requirements." | |
elif fit_status == "moderate fit": | |
recommendation = "This candidate shows potential; however, further assessment or interviews are recommended to clarify their fit for the role." | |
else: | |
recommendation = "We advise against moving forward with this candidate, as they do not meet the key technical requirements for the position." | |
message = ( | |
f"After a detailed analysis of the candidate's resume, we found the following insights:\n\n" | |
f"- **Skills Match**: {skills_similarity:.2f}% (based on required technologies: {', '.join(job_description_skills)})\n" | |
f"- **Experience Match**: {experience_similarity:.2f}% (relevant experience: {candidate_experience} years)\n" | |
f"- **Qualifications Match**: {qualifications_similarity:.2f}%\n\n" | |
f"The overall assessment indicates that the candidate is a {fit_status} for the role. " | |
f"Their skills in {', '.join(resume_skills)} align with the job's requirements of {', '.join(job_description_skills)}. " | |
f"Based on their experience in web application development, particularly with technologies like {', '.join(resume_skills)}, they could contribute effectively to our team.\n\n" | |
f"**Recommendation**: {recommendation}\n" | |
) | |
return message | |
# --- Sentiment Analysis --- # | |
def analyze_sentiment(text): | |
"""Analyzes the sentiment of the text.""" | |
model_name = "mrm8488/distiluse-base-multilingual-cased-v2-finetuned-stsb_multi_mt-es" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
predicted_sentiment = torch.argmax(outputs.logits).item() | |
return ["Negative", "Neutral", "Positive"][predicted_sentiment] | |
# --- Updated Resume Analysis Function --- # | |
def analyze_resume(resume_file, job_description_file): | |
"""Analyzes the resume and job description, returning similarity score, skills, qualifications, and experience matching.""" | |
# Extract resume and job description text | |
try: | |
resume_text = extract_text_from_file(resume_file.name) | |
job_description_text = extract_text_from_file(job_description_file.name) | |
except ValueError as ve: | |
return str(ve) | |
# Extract skills, qualifications, and experience | |
resume_skills = extract_skills_llama(resume_text) | |
job_description_skills = process_job_description(job_description_text) | |
resume_qualifications = extract_qualifications(resume_text) | |
job_description_qualifications = extract_qualifications(job_description_text) | |
resume_experience, resume_job_titles = extract_experience(resume_text) | |
job_description_experience, job_description_titles = extract_experience(job_description_text) | |
# Summarize experiences | |
resume_experience_summary = summarize_experience(resume_text) | |
job_description_experience_summary = summarize_experience(job_description_text) | |
# Calculate semantic similarity for different sections in percentages | |
skills_similarity = calculate_semantic_similarity(' '.join(resume_skills), ' '.join(job_description_skills)) | |
qualifications_similarity = calculate_semantic_similarity(' '.join(resume_qualifications), ' '.join(job_description_qualifications)) | |
experience_similarity = calculate_semantic_similarity(' '.join([str(e) for e in resume_experience]), ' '.join([str(e) for e in job_description_experience])) | |
# Assuming candidate experience is the total of years from the resume | |
candidate_experience = sum(resume_experience) | |
required_experience = sum(job_description_experience) # This assumes job description contains required experience as years | |
# Generate communication based on analysis | |
response_message = communication_generator( | |
resume_skills, | |
job_description_skills, | |
skills_similarity, | |
qualifications_similarity, | |
experience_similarity, | |
candidate_experience | |
) | |
# Analyze sentiment for the resume | |
sentiment = analyze_sentiment(resume_text) | |
return { | |
"skills_similarity": skills_similarity, | |
"qualifications_similarity": qualifications_similarity, | |
"experience_similarity": experience_similarity, | |
"response_message": response_message, | |
"sentiment": sentiment, | |
"resume_experience_summary": resume_experience_summary, | |
"job_description_experience_summary": job_description_experience_summary, | |
"resume_skills": resume_skills, | |
"job_description_skills": job_description_skills, | |
} | |
# --- Gradio Interface --- # | |
def gradio_interface(): | |
"""Defines and runs the Gradio interface.""" | |
with gr.Blocks() as demo: | |
gr.Markdown("# Resume Analyzer") | |
with gr.Row(): | |
resume_file = gr.File(label="Upload Resume (PDF/TXT)") | |
job_description_file = gr.File(label="Upload Job Description (PDF/TXT)") | |
analyze_button = gr.Button("Analyze") | |
with gr.Tab("Results"): | |
output_message = gr.Textbox(label="Analysis Message", lines=10) | |
skills_similarity_output = gr.Number(label="Skills Similarity (%)") | |
qualifications_similarity_output = gr.Number(label="Qualifications Similarity (%)") | |
experience_similarity_output = gr.Number(label="Experience Similarity (%)") | |
sentiment_output = gr.Textbox(label="Sentiment Analysis") | |
resume_summary_output = gr.Textbox(label="Resume Experience Summary", lines=5) | |
job_description_summary_output = gr.Textbox(label="Job Description Experience Summary", lines=5) | |
resume_skills_output = gr.Textbox(label="Resume Skills", lines=5) | |
job_description_skills_output = gr.Textbox(label="Job Description Skills", lines=5) | |
# Link the button to the analysis function | |
analyze_button.click( | |
analyze_resume, | |
inputs=[resume_file, job_description_file], | |
outputs=[ | |
output_message, | |
skills_similarity_output, | |
qualifications_similarity_output, | |
experience_similarity_output, | |
sentiment_output, | |
resume_summary_output, | |
job_description_summary_output, | |
resume_skills_output, | |
job_description_skills_output, | |
] | |
) | |
demo.launch() | |
# Execute the Gradio interface | |
gradio_interface() | |