Spaces:
Build error
Build error
| import gradio as gr | |
| from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns | |
| import pandas as pd | |
| import os | |
| import json | |
| import tempfile | |
| import shutil | |
| import zipfile | |
| from huggingface_hub import snapshot_download | |
| # Constants for PhysicalCodeBench | |
| TITLE = """ | |
| <div style="text-align: center; max-width: 900px; margin: 0 auto;"> | |
| <div> | |
| <h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;"> | |
| PhysicalCodeBench Leaderboard | |
| </h1> | |
| <h3 style="margin-top: 0; margin-bottom: 10px; font-weight: 500;"> | |
| Evaluating LLMs on Physics-based Simulation Code Generation | |
| </h3> | |
| </div> | |
| </div> | |
| """ | |
| INTRODUCTION_TEXT = """ | |
| PhysicalCodeBench evaluates the abilities of Large Language Models (LLMs) to generate code for physics-based simulations. | |
| The benchmark consists of user instructions that describe physical scenarios to be simulated, reference code implementations, | |
| and resulting simulation videos generated using the [Genesis](https://github.com/Genesis-Embodied-AI/Genesis) physics engine. | |
| This leaderboard showcases model performance on the PhysicalCodeBench-50 dataset, measuring both text-based execution success | |
| and visual quality of the generated simulations. | |
| """ | |
| ABOUT_TEXT = """ | |
| ## About PhysicalCodeBench | |
| PhysicalCodeBench evaluates an LLM's ability to: | |
| - Understand natural language descriptions of physical scenarios | |
| - Generate executable code that correctly implements the physics simulation | |
| - Produce visually accurate and physically plausible results | |
| The benchmark covers a variety of physical phenomena including: | |
| - Rigid body dynamics (collisions, rolling, bouncing, etc.) | |
| - Fluid and particle simulations | |
| - Soft body physics | |
| - Controlled environments (robotic arms, drones, etc.) | |
| - Chain reactions and complex interactions | |
| ## Evaluation Metrics | |
| PhysicalCodeBench uses two main evaluation dimensions: | |
| 1. **Text Score (50 points)**: Evaluates code execution success | |
| - Code runs without errors (25 points) | |
| - Generates proper output files (10 points) | |
| - Output files meet required specifications (15 points) | |
| 2. **Visual Score (50 points)**: Evaluates simulation quality | |
| - CLIP Score: Measures text-video alignment (25 points) | |
| - Motion Smoothness: Evaluates physics simulation quality (25 points) | |
| Total score is the sum of Text and Visual scores (maximum 100 points). | |
| """ | |
| SUBMISSION_TEXT = """ | |
| ## How to Submit Your Model Results | |
| 1. Fork the [PhysicalCodeBench repository](https://github.com/Sealical/PhysicalCodeBench) | |
| 2. Generate code for all 50 tasks in the benchmark using your model | |
| 3. Run the evaluation pipeline with your generated code | |
| 4. Create a submission folder with the following structure: | |
| ``` | |
| submission/ | |
| βββ model_info.json # Contains model details (name, size, etc.) | |
| βββ evaluation_results/ # Directory containing all result files | |
| βββ PhysCodeEval_results.json # Main evaluation results file | |
| ``` | |
| 5. Zip your submission folder and upload it below along with your model details | |
| Your submission will be verified and added to the leaderboard once approved. | |
| """ | |
| CITATION_TEXT = """ | |
| @article{PhysicalCodeBench2025, | |
| title={PhysicalCodeBench: Evaluating LLMs on Physics-based Simulation Code Generation}, | |
| author={Your Name and Co-authors}, | |
| journal={arXiv preprint arXiv:XXXX.XXXXX}, | |
| year={2025} | |
| } | |
| """ | |
| # Custom CSS for the interface | |
| custom_css = """ | |
| .markdown-text { | |
| font-size: 16px !important; | |
| text-align: left !important; | |
| } | |
| .tab-button { | |
| font-size: 16px !important; | |
| } | |
| """ | |
| # Define column structure for the leaderboard | |
| class PhysCodeColumn: | |
| def __init__(self, name, type, displayed_by_default=True, never_hidden=False, hidden=False): | |
| self.name = name | |
| self.type = type | |
| self.displayed_by_default = displayed_by_default | |
| self.never_hidden = never_hidden | |
| self.hidden = hidden | |
| # Define the columns for our leaderboard | |
| COLUMNS = [ | |
| PhysCodeColumn("rank", "number", True, True, False), | |
| PhysCodeColumn("model", "str", True, True, False), | |
| PhysCodeColumn("model_type", "str", True, False, False), | |
| PhysCodeColumn("organization", "str", True, False, False), | |
| PhysCodeColumn("text_score", "number", True, False, False), | |
| PhysCodeColumn("visual_score", "number", True, False, False), | |
| PhysCodeColumn("total_score", "number", True, False, False), | |
| PhysCodeColumn("clip_score", "number", False, False, False), | |
| PhysCodeColumn("motion_smooth_score", "number", False, False, False), | |
| PhysCodeColumn("execution_success", "number", False, False, False), | |
| PhysCodeColumn("file_generation", "number", False, False, False), | |
| PhysCodeColumn("file_quality", "number", False, False, False), | |
| PhysCodeColumn("submission_date", "date", False, False, False) | |
| ] | |
| # Enums for model metadata | |
| class ModelType: | |
| Proprietary = "Proprietary" | |
| OpenSource = "Open Source" | |
| CloseSource = "Close Source" | |
| API = "API" | |
| Unknown = "Unknown" | |
| def to_str(model_type): | |
| return model_type | |
| # Load sample data (replace with your actual data loading logic) | |
| def get_leaderboard_df(): | |
| # Sample data based on your README | |
| data = [ | |
| { | |
| "rank": 1, | |
| "model": "GPT4o", | |
| "model_type": ModelType.CloseSource, | |
| "organization": "OpenAI", | |
| "text_score": 16.0, | |
| "visual_score": 18.262, | |
| "total_score": 34.262, | |
| "clip_score": 10.2, | |
| "motion_smooth_score": 8.062, | |
| "execution_success": 10.0, | |
| "file_generation": 3.0, | |
| "file_quality": 3.0, | |
| "submission_date": "2025-01-15" | |
| }, | |
| { | |
| "rank": 2, | |
| "model": "Gemini-2.0-flash", | |
| "model_type": ModelType.CloseSource, | |
| "organization": "Google", | |
| "text_score": 15.0, | |
| "visual_score": 16.963, | |
| "total_score": 31.963, | |
| "clip_score": 9.5, | |
| "motion_smooth_score": 7.463, | |
| "execution_success": 9.0, | |
| "file_generation": 3.0, | |
| "file_quality": 3.0, | |
| "submission_date": "2025-01-20" | |
| }, | |
| { | |
| "rank": 3, | |
| "model": "DS-R1", | |
| "model_type": ModelType.OpenSource, | |
| "organization": "DeepSeek", | |
| "text_score": 14.0, | |
| "visual_score": 15.815, | |
| "total_score": 29.815, | |
| "clip_score": 8.9, | |
| "motion_smooth_score": 6.915, | |
| "execution_success": 8.5, | |
| "file_generation": 3.0, | |
| "file_quality": 2.5, | |
| "submission_date": "2025-01-25" | |
| }, | |
| { | |
| "rank": 4, | |
| "model": "DeepSeek-R1-Distill-Qwen-32B", | |
| "model_type": ModelType.OpenSource, | |
| "organization": "DeepSeek", | |
| "text_score": 12.2, | |
| "visual_score": 15.82, | |
| "total_score": 28.02, | |
| "clip_score": 8.8, | |
| "motion_smooth_score": 7.02, | |
| "execution_success": 7.2, | |
| "file_generation": 2.5, | |
| "file_quality": 2.5, | |
| "submission_date": "2025-01-28" | |
| }, | |
| { | |
| "rank": 5, | |
| "model": "QwQ-32B", | |
| "model_type": ModelType.OpenSource, | |
| "organization": "QwQ Team", | |
| "text_score": 7.1, | |
| "visual_score": 8.964, | |
| "total_score": 16.064, | |
| "clip_score": 4.964, | |
| "motion_smooth_score": 4.0, | |
| "execution_success": 4.1, | |
| "file_generation": 1.5, | |
| "file_quality": 1.5, | |
| "submission_date": "2025-02-05" | |
| }, | |
| { | |
| "rank": 6, | |
| "model": "Qwen-2.5-32B", | |
| "model_type": ModelType.OpenSource, | |
| "organization": "Alibaba", | |
| "text_score": 0.7, | |
| "visual_score": 1.126, | |
| "total_score": 1.826, | |
| "clip_score": 0.626, | |
| "motion_smooth_score": 0.5, | |
| "execution_success": 0.5, | |
| "file_generation": 0.1, | |
| "file_quality": 0.1, | |
| "submission_date": "2025-02-10" | |
| } | |
| ] | |
| return pd.DataFrame(data) | |
| # Function to load submission from JSON file | |
| def load_submissions_from_json(json_path): | |
| if os.path.exists(json_path): | |
| with open(json_path, 'r') as f: | |
| data = json.load(f) | |
| return pd.DataFrame(data) | |
| return None | |
| # Initialize the leaderboard | |
| def init_leaderboard(dataframe): | |
| if dataframe is None or dataframe.empty: | |
| raise ValueError("Leaderboard DataFrame is empty or None.") | |
| return Leaderboard( | |
| value=dataframe, | |
| datatype=[c.type for c in COLUMNS], | |
| select_columns=SelectColumns( | |
| default_selection=[c.name for c in COLUMNS if c.displayed_by_default], | |
| cant_deselect=[c.name for c in COLUMNS if c.never_hidden], | |
| label="Select Columns to Display:", | |
| ), | |
| search_columns=["model", "organization"], | |
| hide_columns=[c.name for c in COLUMNS if c.hidden], | |
| filter_columns=[ | |
| ColumnFilter("model_type", type="checkboxgroup", label="Model types"), | |
| ColumnFilter("organization", type="checkboxgroup", label="Organizations"), | |
| ], | |
| interactive=False, | |
| ) | |
| # Function to handle ZIP file upload and extraction | |
| def process_zip_submission(zip_file): | |
| if zip_file is None: | |
| return "No file uploaded. Please upload a ZIP file containing your submission." | |
| # Create temp directory for extraction | |
| temp_dir = tempfile.mkdtemp() | |
| try: | |
| # Extract the zip file | |
| with zipfile.ZipFile(zip_file.name, 'r') as zip_ref: | |
| zip_ref.extractall(temp_dir) | |
| # Check for required files | |
| model_info_path = os.path.join(temp_dir, "model_info.json") | |
| results_json_path = os.path.join(temp_dir, "PhysCodeEval_results.json") | |
| if not os.path.exists(model_info_path): | |
| return "Error: model_info.json not found in the ZIP file." | |
| if not os.path.exists(results_json_path): | |
| return "Error: PhysCodeEval_results.json not found in the ZIP file." | |
| # Load model info | |
| with open(model_info_path, 'r') as f: | |
| model_info = json.load(f) | |
| # Check for required model info fields | |
| required_fields = ["model_name", "model_type", "organization"] | |
| missing_fields = [field for field in required_fields if field not in model_info] | |
| if missing_fields: | |
| return f"Error: Missing required fields in model_info.json: {', '.join(missing_fields)}" | |
| # TODO: Process the submission files (this would involve your validation logic) | |
| return f"Successfully processed submission for {model_info['model_name']} by {model_info['organization']}. Your submission will be reviewed and added to the leaderboard once approved." | |
| except zipfile.BadZipFile: | |
| return "Error: Invalid ZIP file." | |
| except Exception as e: | |
| return f"Error processing submission: {str(e)}" | |
| finally: | |
| # Clean up | |
| shutil.rmtree(temp_dir) | |
| # Submission form handling | |
| def process_submission(model_name, model_type, organization, team_name, email, submission_link): | |
| # Check for required fields | |
| if not model_name: | |
| return "Error: Model name is required." | |
| if not model_type: | |
| return "Error: Model type is required." | |
| if not email: | |
| return "Error: Contact email is required." | |
| # This would be implemented to handle actual submission processing | |
| return f"Thank you for submitting {model_name} from {organization or team_name}! Your submission will be reviewed and added to the leaderboard once verified. We will contact you at {email} if we need additional information." | |
| # Main application | |
| def create_demo(): | |
| # Load the leaderboard data | |
| leaderboard_df = get_leaderboard_df() | |
| # Create the Gradio interface | |
| demo = gr.Blocks(css=custom_css) | |
| with demo: | |
| gr.HTML(TITLE) | |
| gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") | |
| with gr.Tabs() as tabs: | |
| with gr.TabItem("π Leaderboard", id=0): | |
| leaderboard = init_leaderboard(leaderboard_df) | |
| with gr.TabItem("π Visualizations", id=1): | |
| gr.Markdown("## Performance Comparisons") | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("### Text vs. Visual Scores") | |
| # Add a visualization component here (e.g., scatter plot) | |
| with gr.Column(): | |
| gr.Markdown("### Score Breakdown by Task Type") | |
| # Add a visualization component here (e.g., bar chart) | |
| with gr.Row(): | |
| model_selector = gr.Dropdown( | |
| choices=leaderboard_df["model"].tolist(), | |
| label="Select Model for Detailed Analysis", | |
| multiselect=False, | |
| ) | |
| with gr.TabItem("π About", id=2): | |
| gr.Markdown(ABOUT_TEXT, elem_classes="markdown-text") | |
| with gr.TabItem("π Submit", id=3): | |
| gr.Markdown(SUBMISSION_TEXT, elem_classes="markdown-text") | |
| gr.Markdown("### Submission Details") | |
| with gr.Row(): | |
| zip_file_input = gr.File(label="Upload submission ZIP file*") | |
| with gr.Row(): | |
| with gr.Column(): | |
| model_name_input = gr.Textbox(label="Model Name*") | |
| model_type_input = gr.Dropdown( | |
| choices=["Open Source", "Close Source", "API", "Proprietary"], | |
| label="Model Type*", | |
| multiselect=False, | |
| ) | |
| organization_input = gr.Textbox(label="Organization (if applicable)") | |
| with gr.Column(): | |
| team_name_input = gr.Textbox(label="Team Name (if applicable)") | |
| email_input = gr.Textbox(label="Contact Email*") | |
| submission_link_input = gr.Textbox(label="GitHub Pull Request URL") | |
| submit_button = gr.Button("Submit") | |
| submission_result = gr.Markdown() | |
| # Combined submission function that processes both ZIP and form data | |
| def combined_submission(zip_file, model_name, model_type, organization, team_name, email, submission_link): | |
| if zip_file is None: | |
| return "Error: Please upload a ZIP file containing your submission." | |
| if not model_name or not model_type or not email: | |
| return "Error: Model name, model type, and email are required fields." | |
| # Process ZIP file | |
| zip_result = process_zip_submission(zip_file) | |
| if zip_result.startswith("Error:"): | |
| return zip_result | |
| # Process form data | |
| return f"Thank you for submitting {model_name} from {organization or team_name}! Your submission ZIP has been processed successfully. We will contact you at {email} if we need additional information." | |
| submit_button.click( | |
| combined_submission, | |
| [zip_file_input, model_name_input, model_type_input, organization_input, team_name_input, email_input, submission_link_input], | |
| submission_result, | |
| ) | |
| with gr.Row(): | |
| with gr.Accordion("π Citation", open=False): | |
| citation_button = gr.Textbox( | |
| value=CITATION_TEXT, | |
| label="Citation", | |
| lines=8, | |
| elem_id="citation-button", | |
| show_copy_button=True, | |
| ) | |
| return demo | |
| # Launch the application | |
| if __name__ == "__main__": | |
| demo = create_demo() | |
| demo.launch() |