`;return o},i=(function(){let g={"381":{"id":381,"name":"Qwen 2 VL 7B","model_name":"Qwen 2 VL 7B","desc":"Qwen2 VL 7B is a multimodal LLM from the Qwen Team with multimedia capabilities.","desc_short":"
Qwen2 VL 7B is a multimodal LLM from the Qwen Team with multimedia capabilities.
","desc_more":"","link":"qwen-2-vl-7b","provider":"Official","developer":"Qwen","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/381.jpg","model":165,"sys":"","read_image":1,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Qwen","Unmodified","Image Input","7B Parameters","Multilingual"],"labels":["Official"],"wel":"","asktext":"","short_name":"Qwen 2 VL 7B","set":{"temperature":1}},"789":{"id":789,"name":"Jamba 1.5 Large","model_name":"Jamba 1.5 Large","desc":"Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.","desc_short":"
Jamba 1.5 Large is part of AI21's new family of open models, offering superior speed, efficiency, and quality. It features a 256K effective context window, the longest among open models, enabling improved performance on tasks like document summarization and analysis. Built on a novel SSM-Transformer architecture, it outperforms larger models on benchmarks while maintaining resource efficiency.
","desc_more":"","link":"jamba-1.5-large","provider":"Official","developer":"AI21","tpm":100,"image":"https://hbcdn01.hotbot.com/avatar/789.jpg","model":185,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":145601,"level":2,"ph":"","labels_full":["Official","Premium","Uncensored","Large Context","Unmodified","Mamba","Creative Writing"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"Jamba 1.5 Large","set":{"temperature":1}},"169":{"id":169,"name":"Gemma2 9b","model_name":"Gemma2 9b","desc":"Gemma2 9b is Google's redesigned open model, optimized for outsized performance and unmatched efficiency. Built from the same research and technology used to create the Gemini models, this model provides built-in safety advancements and an expanded parameter size while still being extremely fast.","desc_short":"
Gemma2 9b is Google's redesigned open model, optimized for outsized performance and unmatched efficiency. Built from the same research and technology used to create the Gemini models, this model provides built-in safety advancements and an expanded parameter size while still being extremely fast.
","desc_more":"","link":"gemma2-9b","provider":"Official","developer":"Google","tpm":4,"image":"https://hbcdn01.hotbot.com/avatar/169.jpg","model":37,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Gemma","Google","Unmodified","9B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Gemma2 9b","set":{"temperature":1}},"313":{"id":313,"name":"Mistral 7B Instruct","model_name":"Mistral 7B Instruct","desc":"A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.","desc_short":"
A high-performing, industry-standard 7.3B parameter model, with optimizations for speed and context length.
","desc_more":"","link":"mistral-7b-instruct","provider":"Official","developer":"Mistral AI","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/313.jpg","model":141,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","Mistral","Unmodified","7B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Mistral 7B Instruct","set":{"temperature":1}},"289":{"id":289,"name":"MythoMax 13B","model_name":"MythoMax 13B","desc":"One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.","desc_short":"
One of the highest performing and most popular fine-tunes of Llama 2 13B, with rich descriptions and roleplay.
","desc_more":"","link":"mythomax-13b","provider":"Official","developer":"gryphe","tpm":2,"image":"hotbot.png","model":117,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Unmodified","13B Parameters"],"labels":["Official"],"wel":"","asktext":"","short_name":"MythoMax 13B","set":{"temperature":1}},"205":{"id":205,"name":"Meta Llama3.1 8b","model_name":"Meta Llama3.1 8b","desc":"Llama 3.1 is a group of open-source instruction-tuned models from Meta. These multilingual models have a context length of 128K, state-of-the-art tool use, and strong reasoning capabilities. The 8B variant is a light-weight, ultra-fast model that can run anywhere.","desc_short":"
Llama 3.1 is a group of open-source instruction-tuned models from Meta. These multilingual models have a context length of 128K, state-of-the-art tool use, and strong reasoning capabilities. The 8B variant is a light-weight, ultra-fast model that can run anywhere.
","desc_more":"","link":"meta-llama3.1-8b","provider":"Official","developer":"Meta","tpm":2,"image":"https://hbcdn01.hotbot.com/avatar/205.jpg","model":93,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Meta","Llama","Unmodified","8B Parameters","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"Meta Llama3.1 8b","set":{"temperature":1}},"141":{"id":141,"name":"OpenAI GPT-3.5 Turbo 16k","model_name":"OpenAI GPT-3.5 Turbo 16k","desc":"OpenAI GPT-3.5 Turbo is a fast, inexpensive model for simple tasks developed by OpenAI. Capable of understanding and generating natural language or code, it has been optimized for chat, with an expanded context length of 16K.","desc_short":"
OpenAI GPT-3.5 Turbo is a fast, inexpensive model for simple tasks developed by OpenAI. Capable of understanding and generating natural language or code, it has been optimized for chat, with an expanded context length of 16K.
","desc_more":"","link":"openai-gpt-3.5-turbo-16k","provider":"Official","developer":"OpenAI","tpm":70,"image":"https://hbcdn01.hotbot.com/avatar/141.jpg","model":9,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":56260,"level":2,"ph":"","labels_full":["Official","Premium","Large Context","OpenAI","ChatGPT","Unmodified","Coding"],"labels":["Official","Premium"],"wel":"","asktext":"","short_name":"OpenAI GPT-3.5 Turbo 16k","set":{"temperature":1}},"845":{"id":845,"name":"DeepSeek V3 Chat","model_name":"DeepSeek V3 Chat","desc":"DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.","desc_short":"
DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations reveal that the model outperforms other open-source models and rivals leading closed-source models.
","desc_more":"","link":"deepseek-v3-chat","provider":"Official","developer":"DeepSeek","tpm":5,"image":"https://hbcdn01.hotbot.com/avatar/845.jpg","model":205,"sys":"","read_image":0,"prem":false,"can_at":1,"no_comp":0,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Uncensored","Large Context","DeepSeek","Unmodified","Coding"],"labels":["Official"],"wel":"","asktext":"","short_name":"DeepSeek V3 Chat","set":{"temperature":1}},"1":{"id":1,"name":"HotBot Assistant","model_name":"HotBot Assistant","desc":"HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.","desc_short":"
HotBot Assistant is a comprehensive chat companion that can help you with a number of tasks based on how you talk to it.
","desc_more":"","link":"hotbot-assistant","provider":"Official","developer":"HotBot","tpm":14,"image":"https://hbcdn01.hotbot.com/avatar/1.jpg","model":201,"sys":"","read_image":1,"prem":false,"can_at":0,"no_comp":1,"search":0,"no_search":0,"no_audio":0,"no_attach":0,"wizard":[],"repeat_wizard":0,"per_message":0,"level":0,"ph":"","labels_full":["Official","Unlimited","Image Generation","Web Search","Coding","70B Parameters","Creative Writing"],"labels":["Official"],"wel":"","asktext":"","short_name":"Assistant","set":{"temperature":1}}},b={},d=localStorage,n="hba-botcache",l=()=>{let g={},b=d[n];try{let l=Date.now()-6e5;b&&$.each(JSON.parse(b),(b,d)=>{if(d.cacheTS>l)g[b]=d})}catch(a){}return g},a=g=>{let b=l();for(let a of g){b[a.id]=a;if(!a.cacheTS)a.cacheTS=Date.now()}d[n]=JSON.stringify(b)};$.each(l(),(b,d)=>{if(!g[b])g[b]=d});async function e(d){let n=[],l={};for(let h of(d instanceof Array)?d:[d])if(h=parseInt(h)){if(b[h]&&!g[h])await b[h];if(g[h])l[h]=g[h];else if(n.indexOf(h)<0)n.push(h)}if(n.length){let i=$.post("/lookup-bots.json",{bot_id:n.join(",")});for(let h of n)b[h]=i;let o=await i;if(o.code==0){a(o.data);for(let h of o.data)g[h.id]=h,l[h.id]=h}for(let h of n)delete b[h]}return l}return{cache:g,getBots:e,getBot:async g=>g?(await e(g))[g]:null}})();$('