| | |
| | """ |
| | Helper functions for analyzing kernelbot submissions. |
| | |
| | Usage: |
| | from analyze_submissions import load_submissions, author_progression, top_contestants |
| | """ |
| |
|
| | import pandas as pd |
| | from pathlib import Path |
| |
|
| |
|
| | def format_score(score, unit='us'): |
| | """ |
| | Format score with appropriate units. |
| | |
| | Args: |
| | score: Score in seconds |
| | unit: 'us' for microseconds, 'ms' for milliseconds, 'auto' for automatic |
| | |
| | Returns: |
| | Formatted string with units |
| | """ |
| | if pd.isna(score): |
| | return 'N/A' |
| |
|
| | if unit == 'auto': |
| | if score < 0.001: |
| | return f"{score * 1_000_000:.2f} µs" |
| | elif score < 1: |
| | return f"{score * 1_000:.3f} ms" |
| | else: |
| | return f"{score:.4f} s" |
| | elif unit == 'us': |
| | return f"{score * 1_000_000:.2f} µs" |
| | elif unit == 'ms': |
| | return f"{score * 1_000:.3f} ms" |
| | else: |
| | return f"{score:.6f} s" |
| |
|
| |
|
| | def load_submissions(parquet_path: str = None) -> pd.DataFrame: |
| | """Load deduplicated submissions from parquet file.""" |
| | if parquet_path is None: |
| | parquet_path = Path(__file__).parent.parent.parent / "nvidia_nvfp4_submissions.parquet" |
| | return pd.read_parquet(parquet_path) |
| |
|
| |
|
| | def author_progression(df: pd.DataFrame, user_id: str = None, user_name: str = None, |
| | problem_name: str = None) -> pd.DataFrame: |
| | """ |
| | Get submissions from an author sorted by time to see their progression. |
| | |
| | Args: |
| | df: DataFrame of submissions |
| | user_id: Filter by user ID (Discord ID) |
| | user_name: Filter by username (partial match, case-insensitive) |
| | problem_name: Filter by problem name |
| | |
| | Returns: |
| | DataFrame sorted by submission_time showing the author's journey |
| | """ |
| | result = df.copy() |
| |
|
| | if user_id: |
| | result = result[result['user_id'] == user_id] |
| |
|
| | if user_name: |
| | result = result[result['user_name'].str.contains(user_name, case=False, na=False)] |
| |
|
| | if problem_name: |
| | result = result[result['problem_name'] == problem_name] |
| |
|
| | return result.sort_values('submission_time') |
| |
|
| |
|
| | def top_contestants(df: pd.DataFrame, problem_name: str = None, n: int = 20, |
| | passing_only: bool = True) -> pd.DataFrame: |
| | """ |
| | Get top contestants sorted by their best score (fastest time). |
| | |
| | Args: |
| | df: DataFrame of submissions |
| | problem_name: Filter by problem name (required for meaningful results) |
| | n: Number of top contestants to return |
| | passing_only: Only include passing submissions |
| | |
| | Returns: |
| | DataFrame with top contestants and their best scores |
| | """ |
| | result = df.copy() |
| |
|
| | if problem_name: |
| | result = result[result['problem_name'] == problem_name] |
| |
|
| | if passing_only: |
| | result = result[result['passed'] == True] |
| |
|
| | |
| | result = result.dropna(subset=['score']) |
| |
|
| | if result.empty: |
| | return pd.DataFrame(columns=['user_name', 'user_id', 'score', 'submission_time', 'problem_name']) |
| |
|
| | |
| | best_scores = result.loc[result.groupby('user_id')['score'].idxmin()] |
| |
|
| | return best_scores.sort_values('score').head(n)[ |
| | ['user_name', 'user_id', 'score', 'submission_time', 'problem_name'] |
| | ] |
| |
|
| |
|
| | def leaderboard_summary(df: pd.DataFrame, score_unit='us') -> pd.DataFrame: |
| | """ |
| | Get summary statistics for each problem. |
| | |
| | Args: |
| | df: DataFrame of submissions |
| | score_unit: 'us' for microseconds, 'ms' for milliseconds, 's' for seconds |
| | |
| | Returns: |
| | DataFrame with submission counts, unique users, score ranges |
| | """ |
| | summary = df.groupby('problem_name').agg({ |
| | 'submission_id': 'count', |
| | 'user_id': 'nunique', |
| | 'score': ['min', 'median', 'max'], |
| | 'passed': 'sum' |
| | }) |
| |
|
| | summary.columns = ['submissions', 'unique_users', 'best_score', 'median_score', |
| | 'worst_score', 'passing_count'] |
| |
|
| | |
| | if score_unit == 'us': |
| | multiplier = 1_000_000 |
| | summary['best_score'] = (summary['best_score'] * multiplier).round(2) |
| | summary['median_score'] = (summary['median_score'] * multiplier).round(2) |
| | summary['worst_score'] = (summary['worst_score'] * multiplier).round(2) |
| | elif score_unit == 'ms': |
| | multiplier = 1_000 |
| | summary['best_score'] = (summary['best_score'] * multiplier).round(3) |
| | summary['median_score'] = (summary['median_score'] * multiplier).round(3) |
| | summary['worst_score'] = (summary['worst_score'] * multiplier).round(3) |
| |
|
| | return summary |
| |
|
| |
|
| | def user_stats(df: pd.DataFrame, user_id: str = None, user_name: str = None) -> pd.DataFrame: |
| | """ |
| | Get statistics for a specific user across all problems. |
| | """ |
| | result = df.copy() |
| |
|
| | if user_id: |
| | result = result[result['user_id'] == user_id] |
| | elif user_name: |
| | result = result[result['user_name'].str.contains(user_name, case=False, na=False)] |
| |
|
| | return result.groupby('problem_name').agg({ |
| | 'submission_id': 'count', |
| | 'score': 'min', |
| | 'passed': 'sum' |
| | }).rename(columns={ |
| | 'submission_id': 'num_submissions', |
| | 'score': 'best_score', |
| | 'passed': 'passing_count' |
| | }) |
| |
|