From d2033768505c9e03b3883d1e9cbd183537da7d94 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 11 Jun 2024 01:56:03 +0100 Subject: [PATCH] Update the README --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index 6f10518..acadd13 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,14 @@ Another Ollama bot for discord, however designed for mesh self-hosting. [bot] token = "your-bot-token" debug_guilds = [0123456789] # omit for global commands +db_url = "sqlite://:memory:" +# ^ The database URL. Overridden by $DATABASE_URL. +# The default in a docker environment is IN MEMORY, i.e. `sqlite://:memory:`. +# The default in a non-docker environment is sqlite://default.db, aka sqlite @ ./default.db +# If $DATABASE_URL is set, it will override this setting. +# You can use SQLite, or PostgreSQL. +# You can choose to build jimmy with mysql/mssql&oracle support by changing extra `asyncpg` to `asyncmy`/`asyncodbc` +# in the tortoise-orm requirement in requrements.txt [ollama] order = ["server1", "server2", "fallback"] @@ -17,14 +25,23 @@ order = ["server1", "server2", "fallback"] base_url = "https://hosted.ollama.internal" # default port is 443, because HTTPS gpu = true vram_gb = 8 +default_model="llama3:latest" # sets the default model for /ollama chat [ollama.server2] base_url = "http://192.168.1.2:11434" gpu = true vram_gb = 4 # <8GB will enable "low VRAM mode" in ollama +default_model = "llama2:latest" [ollama.fallback] base_url = "http://192.168.1.250:11434" gpu = false vram_gb = 32 # in the case of CPU Ollama, "vram" is actually just regular RAM. +default_model = "orca-mini:3b" ``` + +## Running + +See [the example docker-compose.yml](/docker-compose.yml) for an example of how to run this bot with docker-compose. + +Alternatively, you can just run the docker image: `git.i-am.nexus/nex/sentient-jimmy:master`.