Use orca-mini:7b instead of 3b

This commit is contained in:
Nexus 2024-04-18 00:57:46 +01:00
parent 33319feb86
commit 531c0e9d4e
Signed by: nex
GPG key ID: 0FA334385D0B689F

View file

@ -878,11 +878,11 @@ class Ollama(commands.Cog):
return await ctx.respond("All servers are offline. Please try again later.", ephemeral=True)
client = OllamaClient(CONFIG["ollama"][server]["base_url"])
if not await client.has_model_named("orca-mini", "3b"):
with client.download_model("orca-mini", "3b") as handler:
if not await client.has_model_named("orca-mini", "7b"):
with client.download_model("orca-mini", "7b") as handler:
async for _ in handler:
self.log.info(
"Downloading orca-mini:3b on server %r - %s (%.2f%%)", server, handler.status, handler.percent
"Downloading orca-mini:7b on server %r - %s (%.2f%%)", server, handler.status, handler.percent
)
if self.lock.locked():
@ -896,7 +896,7 @@ class Ollama(commands.Cog):
await ctx.respond(embed=embed, ephemeral=True)
last_edit = time.time()
msg = None
with client.new_chat("orca-mini:3b", messages) as handler:
with client.new_chat("orca-mini:7b", messages) as handler:
self.log.info("New chat connection established.")
async for ln in handler:
done = ln.get("done") is True