kino / performance_analyzer.py
barathm111's picture
Upload 3 files
d29a8b2 verified
import os
import random
from typing import Dict, Any
from huggingface_hub import InferenceClient
class CollegePerformanceAnalyzer:
def __init__(self):
"""
Initialize the College Performance Analyzer with secure token management.
This method handles:
- Retrieving Hugging Face token from environment secrets
- Initializing the Inference Client
- Setting up performance parameters
"""
try:
# Securely retrieve HF token from environment variables
hf_token = os.environ.get('HF_TOKEN')
if not hf_token:
raise ValueError("No Hugging Face token found. Please set it as a Space secret.")
# Initialize Inference Client with secure token
self.client = InferenceClient(
model="mistralai/Mistral-7B-Instruct-v0.1",
token=hf_token
)
except Exception as e:
print(f"Inference Client Error: {e}")
self.client = None
# Define performance parameters
self.parameters = self.define_performance_parameters()
def define_performance_parameters(self) -> Dict[str, Dict[str, Any]]:
"""
Define comprehensive college performance parameters with weights.
Returns a dictionary of parameters with:
- Weight in overall performance
- Full descriptive name
"""
return {
"SS": {
"weight": 0.06,
"full_name": "Student Strength",
"description": "Total student population and diversity"
},
"FSR": {
"weight": 0.075,
"full_name": "Faculty-Student Ratio",
"description": "Quality of academic interaction and support"
},
"FQE": {
"weight": 0.06,
"full_name": "Faculty Qualification Efficiency",
"description": "Academic credentials and expertise of faculty"
},
"FRU": {
"weight": 0.06,
"full_name": "Faculty Research Utility",
"description": "Research output and impact"
},
"OE+MIR": {
"weight": 0.03,
"full_name": "Outreach & Industry Engagement",
"description": "External collaborations and industry connections"
},
"GUE": {
"weight": 0.12,
"full_name": "Graduate Unemployment Excellence",
"description": "Job placement and career success rates"
},
"GPHD": {
"weight": 0.08,
"full_name": "Graduate PhD Pursuit",
"description": "Higher education and research career progression"
},
"RD": {
"weight": 0.03,
"full_name": "Research Development",
"description": "Research funding and publication quality"
},
"WD": {
"weight": 0.03,
"full_name": "Worldwide Diversity",
"description": "International student and faculty representation"
},
"ESCS": {
"weight": 0.02,
"full_name": "Economic & Social Campus Sustainability",
"description": "Environmental and social responsibility initiatives"
},
"PCS": {
"weight": 0.02,
"full_name": "Peer Campus Satisfaction",
"description": "Student and faculty satisfaction surveys"
},
"PR": {
"weight": 0.10,
"full_name": "Perception Rating",
"description": "Reputation and public perception"
}
}
def generate_performance_scores(self, seed: int = None) -> Dict:
"""
Generate random performance scores with optional seed for reproducibility.
Args:
seed (int, optional): Random seed for consistent score generation
Returns:
Dict with generated performance scores
"""
if seed is not None:
random.seed(seed)
parameters = self.parameters.copy()
for param in parameters:
parameters[param]["score"] = random.randint(0, 100)
return parameters
def calculate_weighted_metrics(self, parameters: Dict) -> Dict:
"""
Calculate comprehensive weighted performance metrics.
Computes:
- Weighted scores
- Total performance score
- NIRF ranking
- Overall rating
"""
# Calculate weighted scores
for param, values in parameters.items():
values["weighted_score"] = values["score"] * values["weight"]
# Compute total weighted score
total_weighted_score = sum(
values["weighted_score"] for values in parameters.values()
)
# Calculate ranking and rating
nirf_rank = int((1000 - total_weighted_score) / 10)
average_score = sum(values["score"] for values in parameters.values()) / len(parameters)
overall_rating = round(average_score / 20) # Convert to 1-5 scale
return {
"parameters": parameters,
"total_weighted_score": total_weighted_score,
"nirf_rank": nirf_rank,
"overall_rating": overall_rating
}
def generate_ai_feedback(self, analysis_results: Dict) -> str:
"""
Generate AI-powered strategic insights using Mistral model.
Provides comprehensive, actionable feedback on college performance.
"""
if not self.client:
return self._generate_manual_feedback(analysis_results)
# Construct detailed feedback prompt
feedback_prompt = self._construct_feedback_prompt(analysis_results)
try:
completion = self.client.text_generation(
model="mistralai/Mistral-7B-Instruct-v0.1",
prompt=feedback_prompt,
max_new_tokens=500,
temperature=0.7,
top_p=0.9,
repetition_penalty=1.1
)
return completion
except Exception as e:
print(f"AI Feedback Generation Error: {e}")
return self._generate_manual_feedback(analysis_results)
def _construct_feedback_prompt(self, analysis_results: Dict) -> str:
"""
Create a structured prompt for AI feedback generation.
"""
parameters = analysis_results['parameters']
overall_rating = analysis_results['overall_rating']
prompt = "Comprehensive College Performance Strategic Analysis:\n\n"
prompt += "Performance Metrics:\n"
for param, details in parameters.items():
prompt += f"{details['full_name']}: {details['score']}/100\n"
prompt += f"\nOverall Rating: {overall_rating}/5\n\n"
prompt += "Provide a detailed strategic analysis including:\n"
prompt += "1. Key institutional strengths\n"
prompt += "2. Critical improvement areas\n"
prompt += "3. Actionable strategic recommendations\n"
prompt += "4. Potential long-term impact on rankings\n"
return prompt
def _generate_manual_feedback(self, analysis_results: Dict) -> str:
"""
Fallback method to generate manual strategic feedback.
"""
parameters = analysis_results['parameters']
overall_rating = analysis_results['overall_rating']
feedback = "### Strategic Performance Analysis\n\n"
feedback += f"**Overall Institutional Rating**: {overall_rating}/5\n\n"
# Identify top strengths and improvement areas
sorted_params = sorted(
parameters.items(),
key=lambda x: x[1]['score'],
reverse=True
)
feedback += "#### Institutional Strengths:\n"
for param, values in sorted_params[:3]:
feedback += f"- **{self.parameters[param]['full_name']}**: Strong performance ({values['score']}/100)\n"
feedback += "\n#### Areas for Strategic Enhancement:\n"
for param, values in sorted_params[-3:]:
feedback += f"- **{self.parameters[param]['full_name']}**: Requires focused improvement (Current: {values['score']}/100)\n"
return feedback