🏆 LLM4SE Leaderboard

Community-Driven Evaluation of Top Large Language Models (LLMs) in Software Engineering (SE) Tasks

The SWE-Model-Arena is an open-source platform designed to evaluate LLMs through human preference, fostering transparency and collaboration. This platform aims to empower the SE community to assess and compare the performance of leading LLMs in related tasks. For technical details, check out our paper.

{
  • "headers": [
    • "Rank",
    • "Model",
    • "Organization",
    • "Elo Score",
    • "Win Rate",
    • "Conversation Efficiency Index",
    • "Consistency Score",
    • "Bradley-Terry Coefficient",
    • "Eigenvector Centrality Value",
    • "Newman Modularity Score",
    • "PageRank Score"
    ],
  • "data": [
    • [
      • 1,
      • "Claude 3.5 Sonnet (2024-06-20)",
      • "Anthropic",
      • 1005.97,
      • 1,
      • 1,
      • null,
      • 0,
      • 1,
      • null,
      • 0.07
      ],
    • [
      • 2,
      • "Grok 3 Beta",
      • "X Ai",
      • 1003.99,
      • 1,
      • 1,
      • null,
      • 0,
      • 0,
      • null,
      • 0.06
      ],
    • [
      • 3,
      • "o3",
      • "Openai",
      • 1003.98,
      • 1,
      • 1,
      • null,
      • 0,
      • 0,
      • null,
      • 0.06
      ],
    • [
      • 4,
      • "Llama 3.1 405B (base)",
      • "Meta Llama",
      • 1003.95,
      • 1,
      • 1,
      • null,
      • 0,
      • 0,
      • null,
      • 0.05
      ],
    • [
      • 5,
      • "Gemini 2.5 Flash Image Preview (Nano Banana)",
      • "Google",
      • 1002,
      • 1,
      • 1,
      • null,
      • 0,
      • 0,
      • null,
      • 0.04
      ],
    • [
      • 5,
      • "Grok 3 Mini Beta",
      • "X Ai",
      • 1002,
      • 1,
      • 1,
      • null,
      • 0,
      • 0,
      • null,
      • 0.04
      ],
    • [
      • 7,
      • "o3 Mini",
      • "Openai",
      • 1001.99,
      • 1,
      • 1,
      • null,
      • 0,
      • 0,
      • null,
      • 0.04
      ],
    • [
      • 8,
      • "Mistral Large 3 2512",
      • "Mistralai",
      • 1001.98,
      • 1,
      • 0.72,
      • null,
      • 0,
      • 0,
      • null,
      • 0.04
      ],
    • [
      • 9,
      • "DeepSeek V3",
      • "Deepseek",
      • 1001.94,
      • 1,
      • 1,
      • null,
      • 0,
      • 0,
      • null,
      • 0.04
      ],
    • [
      • 10,
      • "Claude 3 Opus",
      • "Anthropic",
      • 999.99,
      • 1,
      • 0.53,
      • null,
      • 0,
      • 0,
      • null,
      • 0.04
      ],
    • [
      • 11,
      • "GPT-4 Turbo",
      • "Openai",
      • 999.98,
      • 1,
      • -0.33,
      • null,
      • 0,
      • 0,
      • null,
      • 0.03
      ],
    • [
      • 12,
      • "GPT-4o-mini",
      • "Openai",
      • 998.02,
      • 0.33,
      • -1,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.03
      ],
    • [
      • 12,
      • "Gemma 3 27B",
      • "Google",
      • 998.02,
      • 0.5,
      • 0.3,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.03
      ],
    • [
      • 14,
      • "Claude 3.5 Haiku (2024-10-22)",
      • "Anthropic",
      • 998.01,
      • 0.25,
      • -1,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.03
      ],
    • [
      • 14,
      • "Gemini 2.5 Flash",
      • "Google",
      • 998.01,
      • 0.33,
      • -0.6,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.03
      ],
    • [
      • 14,
      • "o4 Mini",
      • "Openai",
      • 998.01,
      • 0.12,
      • 0.3,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.03
      ],
    • [
      • 17,
      • "GPT-5 Mini",
      • "Openai",
      • 998,
      • 0,
      • -1,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 17,
      • "Gemini 1.5 Pro Experimental",
      • "Google",
      • 998,
      • 0,
      • -1,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 17,
      • "Gemini 2.5 Pro Preview 05-06",
      • "Google",
      • 998,
      • 0,
      • -1,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 17,
      • "R1",
      • "Deepseek",
      • 998,
      • 0,
      • -1,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 17,
      • "o1",
      • "Openai",
      • 998,
      • 0,
      • -1,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 17,
      • "o1-mini",
      • "Openai",
      • 998,
      • 0,
      • 0.3,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 23,
      • "Grok 3 Beta",
      • "X Ai",
      • 997.99,
      • 0,
      • 0.3,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 24,
      • "GPT-4o",
      • "Openai",
      • 996.03,
      • 0,
      • -0.2,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 25,
      • "Claude 3.7 Sonnet",
      • "Anthropic",
      • 996.01,
      • 0,
      • -0.13,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 25,
      • "GPT-4.1 Mini",
      • "Openai",
      • 996.01,
      • 0,
      • -1,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 27,
      • "Claude 3.7 Sonnet (thinking)",
      • "Anthropic",
      • 996,
      • 0,
      • -0.43,
      • null,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 27,
      • "Gemini 2.5 Pro",
      • "Google",
      • 996,
      • 0,
      • null,
      • 1,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 29,
      • "gpt-oss-120b",
      • "Openai",
      • 995.98,
      • 0,
      • 1,
      • 1,
      • 0,
      • 0,
      • 0,
      • 0.02
      ],
    • [
      • 30,
      • "GPT-3.5 Turbo",
      • "Openai",
      • 988.14,
      • 0,
      • -0.63,
      • 0.33,
      • 0,
      • 0,
      • 0,
      • 0.02
      ]
    ],
  • "metadata": null
}

Made with ❤️ for SWE-Model-Arena. If this work is useful to you, please consider citing our vision paper:

@inproceedings{zhao2025se,
title={SE Arena: An Interactive Platform for Evaluating Foundation Models in Software Engineering},
author={Zhao, Zhimin},
booktitle={2025 IEEE/ACM Second International Conference on AI Foundation Models and Software Engineering (Forge)},
pages={78--81},
year={2025},
organization={IEEE}
}