96 lines
3.2 KiB
Python
96 lines
3.2 KiB
Python
"""
|
|
Plugin for generating text using Ollama's Mistral 7B Instruct model and sending it to a Matrix chat room.
|
|
"""
|
|
|
|
import requests
|
|
from asyncio import Queue
|
|
import simplematrixbotlib as botlib
|
|
import argparse
|
|
|
|
# Queue to store pending commands
|
|
command_queue = Queue()
|
|
|
|
API_URL = "http://localhost:11434/api/generate"
|
|
MODEL_NAME = "mistral:7b-instruct"
|
|
|
|
async def process_command(room, message, bot, prefix, config):
|
|
"""
|
|
Queue and process !text commands sequentially.
|
|
"""
|
|
match = botlib.MessageMatch(room, message, bot, prefix)
|
|
if match.prefix() and match.command("text"):
|
|
if command_queue.empty():
|
|
await handle_command(room, message, bot, prefix, config)
|
|
else:
|
|
await command_queue.put((room, message, bot, prefix, config))
|
|
|
|
async def handle_command(room, message, bot, prefix, config):
|
|
"""
|
|
Send the prompt to Ollama API and return the generated text.
|
|
"""
|
|
match = botlib.MessageMatch(room, message, bot, prefix)
|
|
if not (match.prefix() and match.command("text")):
|
|
return
|
|
|
|
# Parse optional arguments
|
|
parser = argparse.ArgumentParser(description='Generate text using Ollama API')
|
|
parser.add_argument('--max_tokens', type=int, default=512, help='Maximum tokens to generate')
|
|
parser.add_argument('--temperature', type=float, default=0.7, help='Temperature for generation')
|
|
parser.add_argument('prompt', nargs='+', help='Prompt for the model')
|
|
|
|
try:
|
|
args = parser.parse_args(message.body.split()[1:]) # Skip command itself
|
|
prompt = ' '.join(args.prompt).strip()
|
|
|
|
if not prompt:
|
|
await bot.api.send_text_message(room.room_id, "Usage: !text <your prompt here>")
|
|
return
|
|
|
|
payload = {
|
|
"model": MODEL_NAME,
|
|
"prompt": prompt,
|
|
"max_tokens": args.max_tokens,
|
|
"temperature": args.temperature,
|
|
"stream": False
|
|
}
|
|
|
|
response = requests.post(API_URL, json=payload, timeout=60)
|
|
response.raise_for_status()
|
|
r = response.json()
|
|
|
|
generated_text = r.get("response", "").strip()
|
|
if not generated_text:
|
|
generated_text = "(No response from model)"
|
|
|
|
await bot.api.send_text_message(room.room_id, generated_text)
|
|
|
|
except argparse.ArgumentError as e:
|
|
await bot.api.send_text_message(room.room_id, f"Argument error: {e}")
|
|
except requests.exceptions.RequestException as e:
|
|
await bot.api.send_text_message(room.room_id, f"Error connecting to Ollama API: {e}")
|
|
except Exception as e:
|
|
await bot.api.send_text_message(room.room_id, f"Unexpected error: {e}")
|
|
finally:
|
|
# Process next command from the queue, if any
|
|
if not command_queue.empty():
|
|
next_command = await command_queue.get()
|
|
await handle_command(*next_command)
|
|
|
|
def print_help():
|
|
"""
|
|
Generates help text for the !text command.
|
|
"""
|
|
return """
|
|
<p>Generate text using Ollama's Mistral 7B Instruct model</p>
|
|
|
|
<p>Usage:</p>
|
|
<ul>
|
|
<li>!text <prompt> - Basic prompt for the model</li>
|
|
<li>Optional arguments:</li>
|
|
<ul>
|
|
<li>--max_tokens MAX_TOKENS - Maximum tokens to generate (default 512)</li>
|
|
<li>--temperature TEMPERATURE - Sampling temperature (default 0.7)</li>
|
|
</ul>
|
|
</ul>
|
|
"""
|