diff --git a/fastchat/model/model_registry.py b/fastchat/model/model_registry.py
index 2eed9649e..7f2e2705d 100644
--- a/fastchat/model/model_registry.py
+++ b/fastchat/model/model_registry.py
@@ -65,7 +65,7 @@ def get_model_info(name: str) -> ModelInfo:
register_model_info(
[
- "claude-3-5-sonnet-20240620",
+ "claude-3-5-sonnet-20240620", "claude-3-5-haiku-20241022", "claude-3-5-sonnet-20241022"
],
"Claude 3.5",
"https://www.anthropic.com/news/claude-3-5-sonnet",
@@ -83,6 +83,8 @@ def get_model_info(name: str) -> ModelInfo:
"llama-3.1-405b-instruct",
"llama-3.1-70b-instruct",
"llama-3.1-8b-instruct",
+ "llama-3.3-70b-instruct",
+
],
"Llama 3.1",
"https://llama.meta.com/",
@@ -102,6 +104,8 @@ def get_model_info(name: str) -> ModelInfo:
"gemini-1.5-pro-api-0514",
"gemini-1.5-flash-api-0514",
"gemini-advanced-0514",
+ "gemini-1.5-flash",
+ "gemini-1.5-pro"
],
"Gemini",
"https://deepmind.google/technologies/gemini/",
@@ -331,6 +335,8 @@ def get_model_info(name: str) -> ModelInfo:
"mistral-7b-instruct-v0.2",
"mistral-7b-instruct",
"pixtral-12b-2409",
+ "mistral-7B-Instruct-v0.3",
+ "mistral-Nemo-Instruct-2407"
],
"Mixtral of experts",
"https://mistral.ai/news/mixtral-of-experts/",
@@ -506,12 +512,13 @@ def get_model_info(name: str) -> ModelInfo:
)
register_model_info(
- ["deepseek-llm-67b-chat"],
+ ["deepseek-llm-67b-chat", "DeepSeek-R1-Distill-Qwen-32B", "DeepSeek-R1-Distill-Qwen-1.5B"],
"DeepSeek LLM",
"https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat",
"An advanced language model by DeepSeek",
)
+
register_model_info(
["stripedhyena-nous-7b"],
"StripedHyena-Nous",
@@ -520,7 +527,7 @@ def get_model_info(name: str) -> ModelInfo:
)
register_model_info(
- ["nous-hermes-2-mixtral-8x7b-dpo"],
+ ["nous-hermes-2-mixtral-8x7b-dpo", "hermes-3-Llama-3.1-8B"],
"Nous-Hermes-2-Mixtral-8x7B-DPO",
"https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"Nous Hermes finetuned from Mixtral 8x7B",
@@ -852,7 +859,7 @@ def get_model_info(name: str) -> ModelInfo:
)
register_model_info(
- ["Meta-Llama-3-8B-Instruct", "Meta-Llama-3-70B-Instruct"],
+ ["Llama-3-8b-instruct", "Llama-3-70b-instruct"],
"llama-3",
"https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct",
"Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes.",
diff --git a/fastchat/serve/gradio_block_arena_anony.py b/fastchat/serve/gradio_block_arena_anony.py
index 625c69c44..ea4fcd643 100644
--- a/fastchat/serve/gradio_block_arena_anony.py
+++ b/fastchat/serve/gradio_block_arena_anony.py
@@ -79,21 +79,21 @@ def vote_last_response(states, vote_type, model_selectors, request: gr.Request):
get_remote_logger().log(data)
gr.Info(
- "🎉 Thanks for voting! Your vote shapes the leaderboard, please vote RESPONSIBLY."
+ "🎉 Aitäh hääletamast! Sinu valikute põhjal moodustub mudelite edetabel. Palun tee oma valik vastutustundlikult."
)
if ":" not in model_selectors[0]:
for i in range(5):
names = (
- "### Model A: " + states[0].model_name,
- "### Model B: " + states[1].model_name,
+ "### Mudel A: " + states[0].model_name,
+ "### Mudel B: " + states[1].model_name,
)
# yield names + ("",) + (disable_btn,) * 4
yield names + (disable_text,) + (disable_btn,) * 5
time.sleep(0.1)
else:
names = (
- "### Model A: " + states[0].model_name,
- "### Model B: " + states[1].model_name,
+ "### Mudel A: " + states[0].model_name,
+ "### Mudel B: " + states[1].model_name,
)
# yield names + ("",) + (disable_btn,) * 4
yield names + (disable_text,) + (disable_btn,) * 5
@@ -187,13 +187,13 @@ def share_click(state0, state1, model_selector0, model_selector1, request: gr.Re
SAMPLING_BOOST_MODELS = []
# outage models won't be sampled.
-OUTAGE_MODELS = []
+OUTAGE_MODELS = ["DeepSeek-R1-Distill-Qwen-32B", "DeepSeek-R1-Distill-Qwen-1.5B"]
def get_sample_weight(model, outage_models, sampling_weights, sampling_boost_models=[]):
if model in outage_models:
return 0
- weight = sampling_weights.get(model, 0)
+ weight = sampling_weights.get(model, 1)
if model in sampling_boost_models:
weight *= 5
return weight
@@ -260,6 +260,7 @@ def get_battle_pair(
rival_model = rival_models[rival_idx]
swap = np.random.randint(2)
+ logger.info(f"Chosen model: {chosen_model}. Rival model: {rival_model}")
if swap == 0:
return chosen_model, rival_model
else:
@@ -439,47 +440,41 @@ def bot_response_multi(
def build_side_by_side_ui_anony(models):
- notice_markdown = f"""
-# ⚔️ Chatbot Arena (formerly LMSYS): Free AI Chat to Compare & Test Best AI Chatbots
-[Blog](https://blog.lmarena.ai/blog/2023/arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2403.04132) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/6GXcFg3TH8) | [Kaggle Competition](https://www.kaggle.com/competitions/lmsys-chatbot-arena)
-
-{SURVEY_LINK}
-
-## 📣 News
-- Chatbot Arena now supports images in beta. Check it out [here](https://lmarena.ai/?vision).
-
-## 📜 How It Works
-- **Blind Test**: Ask any question to two anonymous AI chatbots (ChatGPT, Gemini, Claude, Llama, and more).
-- **Vote for the Best**: Choose the best response. You can keep chatting until you find a winner.
-- **Play Fair**: If AI identity reveals, your vote won't count.
-
-## 🏆 Chatbot Arena LLM [Leaderboard](https://lmarena.ai/leaderboard)
-- Backed by over **1,000,000+** community votes, our platform ranks the best LLM and AI chatbots. Explore the top AI models on our LLM [leaderboard](https://lmarena.ai/leaderboard)!
-
-## 👇 Chat now!
-"""
-
states = [gr.State() for _ in range(num_sides)]
model_selectors = [None] * num_sides
chatbots = [None] * num_sides
- gr.Markdown(notice_markdown, elem_id="notice_markdown")
+ gr.HTML(
+ """
+
+
🇪🇪 Keelemudelite edetabel 🇪🇪
+
Aita valida parimat eestikeelset keelemudelit!
+
+ - Esita oma küsimus. Sinu küsimusele vastavad kaks anonüümset keelemudelit.
+ - Vali kahest vastusest parim. Kui sa kohe valikut ei oska langetada, võid vestlust jätkata kuni oled otsuseni jõudnud.
+ - Sinu valikute põhjal koostame mudelite edetabeli. Palun tee oma otsus vastutustundlikult.
+
+
+ """,
+ elem_id="hero_container",
+ )
with gr.Group(elem_id="share-region-anony"):
with gr.Accordion(
- f"🔍 Expand to see the descriptions of {len(models)} models", open=False
+ f"🔍 Kliki siia, et näha võrdluses olevaid mudeleid",
+ open=False,
+ elem_id="models_accordion",
):
model_description_md = get_model_description_md(models)
gr.Markdown(model_description_md, elem_id="model_description_markdown")
with gr.Row():
for i in range(num_sides):
- label = "Model A" if i == 0 else "Model B"
+ label = "Mudel A" if i == 0 else "Mudel B"
with gr.Column():
chatbots[i] = gr.Chatbot(
label=label,
- elem_id="chatbot",
+ elem_classes=f"chatbot chatbot_{i}",
height=650,
- show_copy_button=True,
latex_delimiters=[
{"left": "$", "right": "$", "display": False},
{"left": "$$", "right": "$$", "display": True},
@@ -497,32 +492,58 @@ def build_side_by_side_ui_anony(models):
with gr.Row():
slow_warning = gr.Markdown("")
- with gr.Row():
- leftvote_btn = gr.Button(
- value="👈 A is better", visible=False, interactive=False
- )
- rightvote_btn = gr.Button(
- value="👉 B is better", visible=False, interactive=False
- )
- tie_btn = gr.Button(value="🤝 Tie", visible=False, interactive=False)
- bothbad_btn = gr.Button(
- value="👎 Both are bad", visible=False, interactive=False
- )
+ with gr.Group(elem_id="fixed_footer"):
+ with gr.Row(elem_id="selection_buttons_row"):
+ leftvote_btn = gr.Button(
+ value="Mudel A on parem",
+ elem_classes="voting_button",
+ visible=False,
+ interactive=False,
+ )
+ tie_btn = gr.Button(
+ value="🤝 Viik",
+ elem_classes="voting_button",
+ visible=False,
+ interactive=False,
+ )
+ bothbad_btn = gr.Button(
+ value="👎 Mõlemad on halvad",
+ elem_classes="voting_button",
+ visible=False,
+ interactive=False,
+ )
+ rightvote_btn = gr.Button(
+ value="Mudel B on parem",
+ elem_classes="voting_button",
+ visible=False,
+ interactive=False,
+ )
- with gr.Row():
- textbox = gr.Textbox(
- show_label=False,
- placeholder="👉 Enter your prompt and press ENTER",
- elem_id="input_box",
- )
- send_btn = gr.Button(value="Send", variant="primary", scale=0)
+ with gr.Row(elem_id="input_row"):
+ textbox = gr.Textbox(
+ show_label=False,
+ autofocus=True,
+ placeholder="👉 Kirjuta siia enda küsimus ja vajuta ENTER",
+ elem_id="input_box",
+ )
+ send_btn = gr.Button(
+ value="Saada", variant="primary", scale=0, elem_id="send_button"
+ )
- with gr.Row() as button_row:
- clear_btn = gr.Button(value="🎲 New Round", interactive=False)
- regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
- share_btn = gr.Button(value="📷 Share")
+ with gr.Row() as button_row:
+ clear_btn = gr.Button(
+ value="🎲 Uus vestlus", elem_classes="control_button", interactive=False
+ )
+ share_btn = gr.Button(
+ value="📷 Jaga", elem_classes="row-middle-button control_button"
+ )
+ regenerate_btn = gr.Button(
+ value="🔄 Genereeri vastus uuesti",
+ elem_classes="control_button",
+ interactive=False,
+ )
- with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
+ with gr.Accordion("Parameetrid", open=False, visible=False) as parameter_row:
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
@@ -548,7 +569,7 @@ def build_side_by_side_ui_anony(models):
label="Max output tokens",
)
- gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
+ # gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
# Register listeners
btn_list = [
@@ -643,7 +664,7 @@ def build_side_by_side_ui_anony(models):
send_btn.click(
add_text,
states + model_selectors + [textbox],
- states + chatbots + [textbox] + btn_list,
+ states + chatbots + [textbox] + btn_list + [slow_warning],
).then(
bot_response_multi,
states + [temperature, top_p, max_output_tokens],
diff --git a/fastchat/serve/gradio_web_server.py b/fastchat/serve/gradio_web_server.py
index 814022eb2..73d449b67 100644
--- a/fastchat/serve/gradio_web_server.py
+++ b/fastchat/serve/gradio_web_server.py
@@ -54,12 +54,14 @@
disable_btn = gr.Button(interactive=False)
invisible_btn = gr.Button(interactive=False, visible=False)
enable_text = gr.Textbox(
- interactive=True, visible=True, placeholder="👉 Enter your prompt and press ENTER"
+ interactive=True,
+ visible=True,
+ placeholder="👉 Kirjuta siia enda küsimus ja vajuta ENTER",
)
disable_text = gr.Textbox(
interactive=False,
visible=True,
- placeholder='Press "🎲 New Round" to start over👇 (Note: Your vote shapes the leaderboard, please vote RESPONSIBLY!)',
+ placeholder='Kliki "🎲 Uus vestlus" et uut vestlust alustada.',
)
controller_url = None
@@ -67,31 +69,12 @@
use_remote_storage = False
acknowledgment_md = """
-### Terms of Service
-
-Users are required to agree to the following terms before using the service:
-
-The service is a research preview. It only provides limited safety measures and may generate offensive content.
-It must not be used for any illegal, harmful, violent, racist, or sexual purposes.
-Please do not upload any private information.
-The service collects user dialogue data, including both text and images, and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) or a similar license.
-
-#### Please report any bug or issue to our [Discord](https://discord.gg/6GXcFg3TH8)/arena-feedback.
-
-### Acknowledgment
-We thank [UC Berkeley SkyLab](https://sky.cs.berkeley.edu/), [Kaggle](https://www.kaggle.com/), [MBZUAI](https://mbzuai.ac.ae/), [a16z](https://www.a16z.com/), [Together AI](https://www.together.ai/), [Hyperbolic](https://hyperbolic.xyz/), [RunPod](https://runpod.io), [Anyscale](https://www.anyscale.com/), [HuggingFace](https://huggingface.co/) for their generous [sponsorship](https://lmsys.org/donations/).
-
-
+Kasutajad on kohustatud nõustuma järgmiste kasutustingimustega:
+
+See platvorm on loodud eelistusandmete ja juhiste kogumiseks parema eestikeelse keelemudeli loomise eesmärgil. Võrdluses olevatele keelemudelitele rakendatakse piiratud modereerimist, mistõtu võivad mudelid genereerida solvavat sisu.
+Seda platvormi ei tohi kasutada ebaseadusliku, kedagi kahjustava, vägivaldse, rassistliku või seksuaalse sisuga teksti genereerimiseks.
+Palun ära siseseta platvormile isiklikku informatsiooni.
+Platvorm kogub kasutajate dialoogiandmeid, ning jätab endale õiguse neid andmeid levitada Creative Commons Attribution (CC-BY) või sarnase litsentsi alusel.
"""
# JSON file format of API-based models:
@@ -643,10 +626,252 @@ def bot_response(
block_css = """
+
+
.prose {
font-size: 105% !important;
}
+.tabs {
+ margin-bottom: 128px;
+ margin-top: -32px;
+}
+
+#input_row {
+ gap: 0;
+}
+
+#input_box, #input_row {
+ background-color: #ffffff70;
+}
+
+#input_box textarea {
+ background-color: white;
+ font-size: 16px;
+}
+
+.chatbot {
+ box-shadow: none;
+}
+
+.chatbot_0 {
+ border-left: 1px solid #e5e5e5 !important;
+}
+
+.chatbot_1 {
+ border-right: 1px solid #e5e5e5 !important;
+}
+
+.voting_button {
+ background: white;
+ color: gray;
+ border-top: 1px solid #AAA;
+ border-right: 1px solid #AAA;
+ border-bottom: 1px solid #AAA;
+}
+
+.voting_button:hover {
+ background: #DDD;
+}
+
+.voting_button:first-child {
+ border-left: 1px solid #AAA;
+ border-top-left-radius: 6px;
+}
+
+.voting_button:last-child {
+ border-top-right-radius: 6px;
+}
+
+#selection_buttons_row {
+ gap: 0;
+}
+
+#hero_text {
+ background-color: #e0f0ff;
+ text-align: center;
+ padding: 32px 24px 56px;
+}
+
+#hero_text h1 {
+ font-size: 34px;
+ padding-bottom: 24px;
+}
+
+#hero_text ol {
+ font-size: 18px;
+}
+
+#hero_text ol li {
+ padding: 2px 0;
+}
+
+#models_accordion {
+ border: 1px solid #e5e5e5 !important;
+ border-bottom: none !important;
+ border-radius: 6px;
+}
+
+
+.contributor_logos_top {
+ margin-top: 4px;
+ float: right;
+ display: inline-flex;
+}
+
+.contributor_logos_bottom {
+ display: none;
+}
+
+.contributor_logo {
+ height: 28px;
+ margin-left: 12px;
+}
+
+#fixed_footer {
+ position: fixed;
+ bottom: 0px;
+ left: 0px;
+ width: calc(100% - 256px);
+ z-index: 25;
+ margin: 0 128px;
+ border-bottom-left-radius: 0;
+ border-bottom-right-radius: 0;
+}
+
+
+@media screen and (max-width: 1024px) {
+ #fixed_footer {
+ width: 100%;
+ margin: 0;
+ }
+}
+
+
+
+@media screen and (min-width: 640px) {
+ .row-middle-button {
+ margin: 0 12px;
+ }
+}
+
+@media screen and (max-width: 640px) {
+
+ .contributor_logos_top {
+ display: none;
+ }
+
+ .contributor_logos_bottom {
+ margin-top: 24px;
+ display: flex;
+ justify-content: space-around;
+ }
+
+ .chatbot {
+ height: 450px !important;
+ }
+
+
+ .message-row.bubble {
+ margin: var(--spacing-xl) var(--spacing-xl) var(--spacing-md) !important;
+ }
+
+ #fixed_footer {
+ width: 100%;
+ margin: 0;
+ border-radius: 0;
+ }
+
+ .control_button:nth-child(1) {
+ order: 1;
+ }
+ .control_button:nth-child(2) {
+ order: 3;
+ }
+ .control_button:nth-child(3) {
+ order: 2;
+ }
+
+ #send_button {
+ min-width: unset;
+ padding: 0 16px;
+ }
+
+ #input_box textarea {
+ padding: 8px;
+ height: 56px !important;
+ }
+
+ #fixed_footer {
+ position: unset;
+ }
+
+ #input_row {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ width: 100%;
+ z-index: 25;
+ }
+
+ #chat_tab {
+ padding: 0;
+ }
+
+ #hero_container {
+ border-radius: 0;
+ margin-bottom: -32px;
+ }
+
+ #hero_text h2 {
+ font-size: 20px;
+ }
+
+ #hero_text h1 {
+ font-size: 30px;
+ }
+
+ #hero_text ol {
+ font-size: 16px;
+ }
+
+ #hero_text {
+ padding: 2px 24px 24px;
+ }
+
+ #selection_buttons_row {
+ position: fixed;
+ bottom: 84px;
+ left: 0;
+ width: 100%;
+ z-index: 25;
+ background: white;
+ }
+
+ .voting_button {
+ min-width: 50%;
+ max-width: 50%;
+ }
+ .voting_button:nth-child(1) {
+ order: 1;
+ }
+ .voting_button:nth-child(2) {
+ order: 3;
+ }
+ .voting_button:nth-child(3) {
+ order: 4;
+ }
+ .voting_button:nth-child(4) {
+ order: 2;
+ }
+
+ .tabs {
+ margin-bottom: 172px;
+ }
+}
+
+
+
#arena_leaderboard_dataframe table {
font-size: 105%;
}
@@ -842,40 +1067,28 @@ def get_model_description_md(models):
return model_description_md
+def build_terms():
+ gr.Markdown(acknowledgment_md, elem_id="terms_markdown")
+
+
def build_about():
about_markdown = """
-# About Us
-Chatbot Arena ([lmarena.ai](https://lmarena.ai)) is an open-source platform for evaluating AI through human preference, developed by researchers at UC Berkeley [SkyLab](https://sky.cs.berkeley.edu/) and [LMSYS](https://lmsys.org). We open-source the [FastChat](https://github.com/lm-sys/FastChat) project at GitHub and release open datasets. We always welcome contributions from the community. If you're interested in getting involved, we'd love to hear from you!
-
-## Open-source contributors
-- Leads: [Wei-Lin Chiang](https://infwinston.github.io/), [Anastasios Angelopoulos](https://people.eecs.berkeley.edu/~angelopoulos/)
-- Contributors: [Lianmin Zheng](https://lmzheng.net/), [Ying Sheng](https://sites.google.com/view/yingsheng/home), [Lisa Dunlap](https://www.lisabdunlap.com/), [Christopher Chou](https://www.linkedin.com/in/chrisychou), [Tianle Li](https://codingwithtim.github.io/), [Evan Frick](https://efrick2002.github.io/), [Dacheng Li](https://dachengli1.github.io/), [Siyuan Zhuang](https://www.linkedin.com/in/siyuanzhuang)
-- Advisors: [Ion Stoica](http://people.eecs.berkeley.edu/~istoica/), [Joseph E. Gonzalez](https://people.eecs.berkeley.edu/~jegonzal/), [Hao Zhang](https://cseweb.ucsd.edu/~haozhang/), [Trevor Darrell](https://people.eecs.berkeley.edu/~trevor/)
-
-## Learn more
-- Chatbot Arena [paper](https://arxiv.org/abs/2403.04132), [launch blog](https://blog.lmarena.ai/blog/2023/arena/), [dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md), [policy](https://blog.lmarena.ai/blog/2024/policy/)
-- LMSYS-Chat-1M dataset [paper](https://arxiv.org/abs/2309.11998), LLM Judge [paper](https://arxiv.org/abs/2306.05685)
-
-## Contact Us
-- Follow our [X](https://x.com/lmsysorg), [Discord](https://discord.gg/6GXcFg3TH8) or email us at `lmarena.ai@gmail.com`
-- File issues on [GitHub](https://github.com/lm-sys/FastChat)
-- Download our datasets and models on [HuggingFace](https://huggingface.co/lmsys)
-
-## Acknowledgment
-We thank [SkyPilot](https://github.com/skypilot-org/skypilot) and [Gradio](https://github.com/gradio-app/gradio) team for their system support.
-We also thank [UC Berkeley SkyLab](https://sky.cs.berkeley.edu/), [Kaggle](https://www.kaggle.com/), [MBZUAI](https://mbzuai.ac.ae/), [a16z](https://www.a16z.com/), [Together AI](https://www.together.ai/), [Hyperbolic](https://hyperbolic.xyz/), [RunPod](https://runpod.io), [Anyscale](https://www.anyscale.com/), [HuggingFace](https://huggingface.co/) for their generous sponsorship. Learn more about partnership [here](https://lmsys.org/donations/).
-
-
+# Meist
+Keelemudelite edetabel ([vestle.tartunlp.ai](https://vestle.tartunlp.ai)) on avatud lähtekoodiga platvorm keelemudelite hindamiseks kasutaja eelistuste kaudu. Platvormi lähtekoodi autorid on teadlased California Ülikoolist Berkeleys ([koduleht](https://sky.cs.berkeley.edu/)) ja mittetulundusorganisatsioonist [LMSYS](https://lmsys.org). [Originaalkoodi](https://github.com/lm-sys/FastChat) kohandas eestikeelseks [TartuNLP](https://tartunlp.ai).
+
+
+## Originaalautorid
+- tiimijuhid: [Wei-Lin Chiang](https://infwinston.github.io/), [Anastasios Angelopoulos](https://people.eecs.berkeley.edu/~angelopoulos/)
+- panustajad: [Lianmin Zheng](https://lmzheng.net/), [Ying Sheng](https://sites.google.com/view/yingsheng/home), [Lisa Dunlap](https://www.lisabdunlap.com/), [Christopher Chou](https://www.linkedin.com/in/chrisychou), [Tianle Li](https://codingwithtim.github.io/), [Evan Frick](https://efrick2002.github.io/), [Dacheng Li](https://dachengli1.github.io/), [Siyuan Zhuang](https://www.linkedin.com/in/siyuanzhuang)
+- nõustajad: [Ion Stoica](http://people.eecs.berkeley.edu/~istoica/), [Joseph E. Gonzalez](https://people.eecs.berkeley.edu/~jegonzal/), [Hao Zhang](https://cseweb.ucsd.edu/~haozhang/), [Trevor Darrell](https://people.eecs.berkeley.edu/~trevor/)
+
+## Loe rohkem (materjalid inglise keeles)
+- Chatbot Arena [artikkel](https://arxiv.org/abs/2403.04132), [blogipostitus](https://blog.lmarena.ai/blog/2023/arena/), [andmed](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md), [tingimused](https://blog.lmarena.ai/blog/2024/policy/)
+- LMSYS-Chat-1M andmestik [artikkel](https://arxiv.org/abs/2309.11998), LLM Judge [artikkel](https://arxiv.org/abs/2306.05685)
+
+## Kontakt
+- TBA
+
"""
gr.Markdown(about_markdown, elem_id="about_markdown")
@@ -971,8 +1184,8 @@ def build_single_model_ui(models, add_promotion_links=False):
label="Max output tokens",
)
- if add_promotion_links:
- gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
+ # if add_promotion_links:
+ # gr.Markdown(acknowledgment_md, elem_id="ack_markdown")
# Register listeners
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
diff --git a/fastchat/serve/gradio_web_server_multi.py b/fastchat/serve/gradio_web_server_multi.py
index 7f9426445..c57cb6a52 100644
--- a/fastchat/serve/gradio_web_server_multi.py
+++ b/fastchat/serve/gradio_web_server_multi.py
@@ -34,6 +34,7 @@
block_css,
build_single_model_ui,
build_about,
+ build_terms,
get_model_list,
load_demo_single,
get_ip,
@@ -80,6 +81,19 @@ def build_visualizer():
gr.HTML(frame)
+def build_logos(position="top"):
+ gr.HTML(
+ f"""
+
+"""
+ )
+
+
def load_demo(context: Context, request: gr.Request):
ip = get_ip(request)
logger.info(f"load_demo. ip: {ip}. params: {request.query_params}")
@@ -105,11 +119,11 @@ def load_demo(context: Context, request: gr.Request):
vision_arena=False,
)
- context.vision_models, context.all_vision_models = get_model_list(
- args.controller_url,
- args.register_api_endpoint_file,
- vision_arena=True,
- )
+ # context.vision_models, context.all_vision_models = get_model_list(
+ # args.controller_url,
+ # args.register_api_endpoint_file,
+ # vision_arena=True,
+ # )
# Text models
if args.vision_arena:
@@ -121,19 +135,19 @@ def load_demo(context: Context, request: gr.Request):
direct_chat_updates = load_demo_single(context, request.query_params)
else:
- direct_chat_updates = load_demo_single(context, request.query_params)
+ # direct_chat_updates = load_demo_single(context, request.query_params)
side_by_side_anony_updates = load_demo_side_by_side_anony(
context.all_text_models, request.query_params
)
- side_by_side_named_updates = load_demo_side_by_side_named(
- context.text_models, request.query_params
- )
+ # side_by_side_named_updates = load_demo_side_by_side_named(
+ # context.text_models, request.query_params
+ # )
tabs_list = (
[gr.Tabs(selected=inner_selected)]
+ side_by_side_anony_updates
- + side_by_side_named_updates
- + direct_chat_updates
+ # + side_by_side_named_updates
+ # + direct_chat_updates
)
return tabs_list
@@ -149,6 +163,10 @@ def build_demo(
head_js = """
+
"""
if args.ga_id is not None:
head_js += f"""
@@ -162,14 +180,32 @@ def build_demo(
window.__gradio_mode__ = "app";
"""
- text_size = gr.themes.sizes.text_lg
+
+ theme = gr.themes.Monochrome(
+ primary_hue="amber",
+ secondary_hue="neutral",
+ radius_size="sm",
+ font=[
+ gr.themes.GoogleFont("Quicksand"),
+ "ui-sans-serif",
+ "system-ui",
+ "sans-serif",
+ ],
+ ).set(
+ button_primary_background_fill="*primary_500",
+ button_primary_background_fill_hover="*primary_600",
+ button_secondary_background_fill="*secondary_500",
+ button_secondary_background_fill_hover="*secondary_600",
+ )
with gr.Blocks(
- title="Chatbot Arena (formerly LMSYS): Free AI Chat to Compare & Test Best AI Chatbots",
- theme=gr.themes.Default(text_size=text_size),
+ title="Keelemudelite edetabel",
+ theme=theme,
css=block_css,
head=head_js,
) as demo:
- with gr.Tabs() as inner_tabs:
+ build_logos()
+
+ with gr.Tabs(elem_classes="tabs") as inner_tabs:
if args.vision_arena:
with gr.Tab("⚔️ Arena (battle)", id=0) as arena_tab:
arena_tab.select(None, None, None, js=load_js)
@@ -192,33 +228,33 @@ def build_demo(
)
else:
- with gr.Tab("⚔️ Arena (battle)", id=0) as arena_tab:
+ with gr.Tab("🇪🇪 Vestle", id=0, elem_id="chat_tab") as arena_tab:
arena_tab.select(None, None, None, js=load_js)
side_by_side_anony_list = build_side_by_side_ui_anony(
context.all_text_models
)
- with gr.Tab("⚔️ Arena (side-by-side)", id=1) as side_by_side_tab:
- side_by_side_tab.select(None, None, None, js=alert_js)
- side_by_side_named_list = build_side_by_side_ui_named(
- context.text_models
- )
+ # with gr.Tab("⚔️ Arena (side-by-side)", id=1) as side_by_side_tab:
+ # side_by_side_tab.select(None, None, None, js=alert_js)
+ # side_by_side_named_list = build_side_by_side_ui_named(
+ # context.text_models
+ # )
- with gr.Tab("💬 Direct Chat", id=2) as direct_tab:
- direct_tab.select(None, None, None, js=alert_js)
- single_model_list = build_single_model_ui(
- context.text_models, add_promotion_links=True
- )
+ # with gr.Tab("💬 Direct Chat", id=2) as direct_tab:
+ # direct_tab.select(None, None, None, js=alert_js)
+ # single_model_list = build_single_model_ui(
+ # context.text_models, add_promotion_links=True
+ # )
demo_tabs = (
[inner_tabs]
+ side_by_side_anony_list
- + side_by_side_named_list
- + single_model_list
+ # + side_by_side_named_list
+ # + single_model_list
)
if elo_results_file:
- with gr.Tab("🏆 Leaderboard", id=3):
+ with gr.Tab("🏆 Tulemused", id=3):
build_leaderboard_tab(
elo_results_file,
leaderboard_table_file,
@@ -229,9 +265,14 @@ def build_demo(
with gr.Tab("🔍 Arena Visualizer", id=5):
build_visualizer()
- with gr.Tab("ℹ️ About Us", id=4):
+ with gr.Tab("ℹ️ Meist", id=4, elem_classes="tab-button"):
build_about()
+ with gr.Tab("📜 Kasutajatingimused", id=5):
+ build_terms()
+
+ build_logos("bottom")
+
context_state = gr.State(context)
if args.model_list_mode not in ["once", "reload"]: