diff --git a/config.json b/config.json index c32e02c..ddc0585 100644 --- a/config.json +++ b/config.json @@ -1,5 +1,31 @@ -[ +{ + "matrix": + { + "server": "https://matrix.org", + "username": "@USERNAME:SERVER.TLD", + "password": "PASSWORD", + "channels": + [ + "#channel1:SERVER.TLD", + "#channel2:SERVER.TLD", + "#channel3:SERVER.TLD", + "!ExAmPleOfApRivAtErOoM:SERVER.TLD" + ], + "admins": + [ + "admin_nick1", + "admin_nick2" + ] + }, + "ollama": { + "api_base": "http://localhost:11434", + "options": + { + "temperature": 0.8, + "top_p": 0.7, + "repeat_penalty": 1.2 + }, "models": { "llama3": "llama3:8b-instruct-q5_K_M", @@ -11,40 +37,9 @@ "dolphin-mistral": "dolphin-mistral:7b-v2.8-q5_K_M", "dolphin-llama3": "dolphin-llama3:8b-v2.9-q5_K_M", "llama3.1": "llama3.1:8b-instruct-q5_K_M" - }, - - "default_model": "llama3.1" - - }, - { - "server": "https://matrix.org", - "username": "@USERNAME:SERVER.TLD", - "password": "PASSWORD", - - "channels": - [ - "#channel1:SERVER.TLD", - "#channel2:SERVER.TLD", - "#channel3:SERVER.TLD", - "!ExAmPleOfApRivAtErOoM:SERVER.TLD" - ], - - "personality": "a minimalist AI assistant who provides longer responses when requested", - "admins": - [ - "admin_nick1", - "admin_nick2" - ] - - }, - { - "api_base": "http://localhost:11434", - "options": - { - "temperature": 0.8, - "top_p": 0.7, - "repeat_penalty": 1.2 - } + "default_model": "llama3.1", + "prompt": ["you are ", ". roleplay and speak in the first person and never break character. keep your responses brief and to the point."], + "personality": "a minimalist AI assistant who provides longer responses when requested" } -] \ No newline at end of file +} \ No newline at end of file diff --git a/ollamarama.py b/ollamarama.py index 150faf4..0c58dab 100644 --- a/ollamarama.py +++ b/ollamarama.py @@ -19,8 +19,10 @@ class ollamarama: config = json.load(f) f.close() - self.server, self.username, self.password, self.channels, self.default_personality, self.admins = config[1].values() - self.api_url = config[2]['api_base'] + "/api/chat" + self.server, self.username, self.password, self.channels, self.admins = config['matrix'].values() + + self.api_url = config['ollama']['api_base'] + "/api/chat" + self.default_personality = config['ollama']['personality'] self.personality = self.default_personality self.client = AsyncClient(self.server, self.username) @@ -32,14 +34,14 @@ class ollamarama: self.messages = {} #prompt parts - self.prompt = ("you are ", ". roleplay and speak in the first person and never break character. keep your responses brief and to the point.") + self.prompt = config['ollama']['prompt'] - self.models = config[0]['models'] + self.models = config['ollama']['models'] #set model - self.default_model = self.models[config[0]['default_model']] + self.default_model = self.models[config['ollama']['default_model']] self.model = self.default_model - self.temperature, self.top_p, self.repeat_penalty = config[2]['options'].values() + self.temperature, self.top_p, self.repeat_penalty = config['ollama']['options'].values() self.defaults = { "temperature": self.temperature, "top_p": self.top_p, @@ -182,7 +184,7 @@ class ollamarama: with open(self.config_file, "r") as f: config = json.load(f) f.close() - self.models = config[0]['models'] + self.models = config['ollama']['models'] if message == ".models": current_model = f"Current model: {self.model}\nAvailable models: {', '.join(sorted(list(self.models)))}" await self.send_message(room_id, current_model)