Created
February 6, 2025 06:12
-
-
Save meftaul/094b3f3902cb42a048ea6962a5799aa1 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import sys | |
| def get_score(category, max_score, options): | |
| print(f"\n{category} ({max_score} points)") | |
| for score, description in options.items(): | |
| print(f"{score} – {description}") | |
| while True: | |
| try: | |
| score = int(input("Enter score: ")) | |
| if score in options: | |
| return score | |
| else: | |
| print(f"Invalid input. Please enter one of {list(options.keys())}.") | |
| except ValueError: | |
| print("Invalid input. Please enter a number.") | |
| def main(): | |
| print("\n=== Student Project Evaluation CLI ===") | |
| student_name = input("Enter Student Name: ") | |
| student_id = input("Enter Student ID: ") | |
| categories = { | |
| "Project Understanding & Clarity": (10, { | |
| 10: "Clearly understands and explains the problem, objective, and approach.", | |
| 8: "Mostly clear, with minor gaps in explanation.", | |
| 6: "Some understanding, but key elements are missing.", | |
| 4: "Lacks clarity and coherence in defining the project.", | |
| 2: "No clear understanding of the project." | |
| }), | |
| "Data Handling & Preparation": (10, { | |
| 10: "Properly cleaned, preprocessed, and justified all data choices.", | |
| 8: "Good data preparation with minor issues.", | |
| 6: "Some preprocessing done but lacks key steps.", | |
| 4: "Data handling is incomplete or incorrect.", | |
| 2: "No proper data cleaning or preprocessing." | |
| }), | |
| "Methodology & Model Selection": (15, { | |
| 15: "Well-justified model choices, including comparisons and tuning.", | |
| 12: "Good selection but lacks explanation for alternative models.", | |
| 9: "Basic model choice with minimal justification.", | |
| 6: "Poor model selection or incorrect methodology.", | |
| 3: "No logical model selection." | |
| }), | |
| "Model Performance & Evaluation": (15, { | |
| 15: "Excellent evaluation with proper metrics, analysis, and insights.", | |
| 12: "Good evaluation but lacks detailed analysis.", | |
| 9: "Basic evaluation with limited metrics or interpretation.", | |
| 6: "Poor performance analysis or incorrect evaluation.", | |
| 3: "No proper performance evaluation." | |
| }), | |
| "Implementation & Code Quality": (15, { | |
| 15: "Clean, well-structured, and well-commented code.", | |
| 12: "Good code but could improve readability.", | |
| 9: "Functional but lacks structure and comments.", | |
| 6: "Messy or difficult to understand.", | |
| 3: "Non-functional or copied code." | |
| }), | |
| "Interpretation & Business Impact": (10, { | |
| 10: "Clearly explains findings and their real-world impact.", | |
| 8: "Good insights but lacks depth.", | |
| 6: "Some insights but minimal connection to the problem.", | |
| 4: "Weak or unclear interpretations.", | |
| 2: "No meaningful insights provided." | |
| }), | |
| "Originality & Effort": (10, { | |
| 10: "Fully original work with exceptional effort.", | |
| 8: "Good effort with minor originality concerns.", | |
| 6: "Some effort but lacks innovation.", | |
| 4: "Minimal effort or partial plagiarism.", | |
| 2: "Copied or AI-generated work." | |
| }), | |
| "Presentation & Documentation": (10, { | |
| 10: "Well-structured report and/or presentation with clear explanations.", | |
| 8: "Good documentation with minor improvements needed.", | |
| 6: "Basic presentation but lacks details.", | |
| 4: "Poor documentation or missing key sections.", | |
| 2: "No proper presentation or documentation." | |
| }) | |
| } | |
| total_score = 0 | |
| scores = {} | |
| for category, (max_score, options) in categories.items(): | |
| score = get_score(category, max_score, options) | |
| scores[category] = score | |
| total_score += score | |
| print("\n=== Evaluation Summary ===") | |
| print(f"Student Name: {student_name}") | |
| print(f"Student ID: {student_id}") | |
| print("-----------------------------") | |
| for category, score in scores.items(): | |
| print(f"{category}: {score} points") | |
| print("-----------------------------") | |
| print(f"Total Score: {total_score}/100") | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment