Add server info command
All checks were successful
Build and Publish / build_and_publish (push) Successful in 50s
All checks were successful
Build and Publish / build_and_publish (push) Successful in 50s
This commit is contained in:
parent
c04e73dff9
commit
954d01bca5
1 changed files with 41 additions and 8 deletions
|
@ -63,7 +63,15 @@ class Chat(commands.Cog):
|
|||
self.server_locks[server.name] = asyncio.Lock()
|
||||
self.log = logging.getLogger(__name__)
|
||||
|
||||
@commands.slash_command()
|
||||
ollama_group = discord.SlashCommandGroup(
|
||||
name="ollama",
|
||||
description="Commands related to ollama.",
|
||||
guild_only=True,
|
||||
max_concurrency=commands.MaxConcurrency(1, per=commands.BucketType.user, wait=False),
|
||||
cooldown=commands.CooldownMapping(commands.Cooldown(1, 10), commands.BucketType.user)
|
||||
)
|
||||
|
||||
@ollama_group.command()
|
||||
async def status(self, ctx: discord.ApplicationContext):
|
||||
"""Checks the status on all servers."""
|
||||
await ctx.defer()
|
||||
|
@ -115,13 +123,38 @@ class Chat(commands.Cog):
|
|||
)
|
||||
await ctx.edit(embed=embed)
|
||||
|
||||
ollama_group = discord.SlashCommandGroup(
|
||||
name="ollama",
|
||||
description="Commands related to ollama.",
|
||||
guild_only=True,
|
||||
max_concurrency=commands.MaxConcurrency(1, per=commands.BucketType.user, wait=False),
|
||||
cooldown=commands.CooldownMapping(commands.Cooldown(1, 10), commands.BucketType.user)
|
||||
)
|
||||
@ollama_group.command(name="server-info")
|
||||
async def get_server_info(
|
||||
self,
|
||||
ctx: discord.ApplicationContext,
|
||||
server: typing.Annotated[
|
||||
str,
|
||||
discord.Option(
|
||||
discord.SlashCommandOptionType.string,
|
||||
description="The server to use.",
|
||||
autocomplete=_ServerOptionAutocomplete,
|
||||
default=get_servers()[0].name
|
||||
)
|
||||
]
|
||||
):
|
||||
"""Gets information on a given server"""
|
||||
await ctx.defer()
|
||||
server = get_server(server)
|
||||
is_online = await server.is_online()
|
||||
y = "\N{white heavy check mark}"
|
||||
x = "\N{cross mark}"
|
||||
t = {True: y, False: x}
|
||||
rt = "VRAM" if server.gpu else "RAM"
|
||||
lines = [
|
||||
f"Name: {server.name!r}",
|
||||
f"Base URL: {server.base_url!r}",
|
||||
f"GPU Enabled: {t[server.gpu]}",
|
||||
f"{rt}: {server.vram_gb:,} GB",
|
||||
f"Default Model: {server.default_model!r}",
|
||||
f"Is Online: {t[is_online]}"
|
||||
]
|
||||
p = "```md\n" + "\n".join(lines) + "```"
|
||||
return await ctx.respond(p)
|
||||
|
||||
@ollama_group.command(name="chat")
|
||||
async def start_ollama_chat(
|
||||
|
|
Loading…
Reference in a new issue