Infermatic output cleanup. Added dictionary and joke plugins. Updated Readme.

This commit is contained in:
2026-05-07 21:02:49 -05:00
parent f8e8ae9533
commit c263b2c40e
4 changed files with 203 additions and 13 deletions
+13 -13
View File
@@ -9,6 +9,7 @@ import json
import simplematrixbotlib as botlib
from asyncio import Queue
from dotenv import load_dotenv
import re
# Load environment variables from .env file in the parent directory
plugin_dir = os.path.dirname(os.path.abspath(__file__))
@@ -65,7 +66,7 @@ async def handle_command(room, message, bot, prefix, config):
try:
# Extract options manually since argparse doesn't handle mixed positional/optional well
temperature = 0.9
max_tokens = 2048
max_tokens = 512
custom_model = None
prompt_parts = []
@@ -174,10 +175,11 @@ async def list_models(room, bot):
except Exception as e:
await bot.api.send_text_message(room.room_id, f"❌ Unexpected error: {str(e)}")
import re # add at the top of the file
async def generate_text(room, bot, prompt, model, temperature, max_tokens):
"""Generate text using the Infermatic AI API."""
try:
# Send initial processing message
await bot.api.send_text_message(room.room_id, f"📝 Generating text...")
url = f"{INFERMATIC_API_BASE}/chat/completions"
@@ -205,14 +207,13 @@ async def generate_text(room, bot, prompt, model, temperature, max_tokens):
await bot.api.send_text_message(room.room_id, "No response generated.")
return
# Format the output with collapsible sections
output = f"<details><summary><strong>📝 Generated Text (Click to expand)</strong></summary>"
output += f"<strong>Model:</strong> <code>{model}</code><br><br>"
output += f"<strong>Prompt:</strong> {prompt}<br><br>"
output += f"<strong>Response:</strong><br><br>"
output += f"{generated_text}"
output += f"</details>"
# ---- Clean up blank lines that break list rendering ----
# Remove blank lines directly before a list item (numberdot or hyphen).
generated_text = re.sub(r'\n\n(\d+\.)', r'\n\1', generated_text)
generated_text = re.sub(r'\n\n(- )', r'\n\1', generated_text)
# Build a pure Markdown message (no HTML)
output = f"**Model:** `{model}`\n\n**Prompt:** {prompt}\n\n**Response:**\n\n{generated_text}"
await bot.api.send_markdown_message(room.room_id, output)
except requests.exceptions.Timeout:
@@ -227,7 +228,6 @@ async def generate_text(room, bot, prompt, model, temperature, max_tokens):
except Exception as e:
await bot.api.send_text_message(room.room_id, f"❌ Error generating text: {str(e)}")
finally:
# Process next queued command
if not command_queue.empty():
next_command = await command_queue.get()
await handle_command(*next_command)
@@ -237,9 +237,9 @@ async def generate_text(room, bot, prompt, model, temperature, max_tokens):
# Plugin Metadata
# ---------------------------------------------------------------------------
__version__ = "1.0.0"
__version__ = "1.0.2"
__author__ = "Funguy Bot"
__description__ = "AI text generation via Infermatic API"
__description__ = "AI text generation via Infermatic API (pure Markdown output)"
__help__ = """
<details>
<summary><strong>!text</strong> AI text generation (Infermatic)</summary>
@@ -248,7 +248,7 @@ __help__ = """
<li><code>!text --list-models</code> List available models</li>
<li><code>!text --use-model &lt;model&gt; &lt;prompt&gt;</code> Specific model</li>
<li><code>--temperature &lt;0.0-1.0&gt;</code> Set creativity (default 0.9)</li>
<li><code>--max-tokens &lt;number&gt;</code> Max output length (default 2048)</li>
<li><code>--max-tokens &lt;number&gt;</code> Max output length (default 512)</li>
</ul>
<p>Requires <strong>INFERMATIC_API</strong> env var.</p>
</details>