From 6f86fe679ff3f60289675a1c53a74418d2dc959b Mon Sep 17 00:00:00 2001 From: Hash Borgir Date: Sun, 26 Apr 2026 02:20:23 -0500 Subject: [PATCH] add infermatic-text and whois plugins for AI text generation and WHOIS lookups --- README.md | 65 +++++++++- funguy.conf | 2 +- funguy.py | 2 +- plugins/help.py | 45 ++++++- plugins/infermatic-text.py | 233 ++++++++++++++++++++++++++++++++++++ plugins/sd_text.py | 95 --------------- plugins/stable-diffusion.py | 2 +- plugins/youtube-preview.py | 187 ----------------------------- plugins/youtube-search.py | 2 +- requirements.txt | 2 + 10 files changed, 345 insertions(+), 290 deletions(-) create mode 100644 plugins/infermatic-text.py delete mode 100644 plugins/sd_text.py delete mode 100644 plugins/youtube-preview.py diff --git a/README.md b/README.md index 23bc3aa..a2d66ef 100644 --- a/README.md +++ b/README.md @@ -205,6 +205,41 @@ Data Returned: Requires DNSDUMPSTER_KEY environment variable in .env file ``` +### πŸ” WHOIS Lookup + +**🌐 !whois ** +Perform comprehensive WHOIS lookups for domains and IP addresses. + +**Features:** +- Domain validation and IP address recognition +- Registrar information and WHOIS server details +- Registration, update, and expiration dates +- Domain status and name server information +- Organization and geographic contact details +- Formatted HTML output with clear sections +- Comprehensive error handling for invalid queries + +**Usage Examples:** +```bash +!whois example.com +!whois google.com +!whois 8.8.8.8 +!whois 1.1.1.1 +``` + +**Output includes:** +- Domain/IP query information +- Registrar and WHOIS server +- Important dates (creation, update, expiration) +- Domain status codes +- Name servers (up to 5, with count if more) +- Contact information (organization, country, state, city) + +**Error Handling:** +- Validates domain/IP format before querying +- Provides clear error messages for failed lookups +- Handles rate limiting and WHOIS server unavailability + ## ExploitDB Plugin A security plugin that searches Exploit-DB for vulnerabilities and exploits directly from Matrix. @@ -368,9 +403,33 @@ Generates images using self-hosted Stable Diffusion with customizable parameters - `--sampler` - Sampler name (default: DPM++ SDE) **πŸ“„ !text [prompt] [options]** -Generates text using Ollama's Mistral 7B Instruct model: -- `--max_tokens` - Maximum tokens to generate (default: 512) -- `--temperature` - Sampling temperature (default: 0.7) +Generates text using the Infermatic AI API with multiple model support: + +**Main Commands:** +- `!text ` - Generate text using the default model from INFERMATIC_MODEL +- `!text --list-models` - List all available models from Infermatic AI +- `!text --use-model ` - Use a specific model instead of the default + +**Parameters:** +- `--temperature ` - Set generation temperature (0.0-1.0, default: 0.9) +- `--max-tokens ` - Set maximum tokens to generate (default: 2048) + +**Configuration:** +- Requires `INFERMATIC_API` environment variable in `.env` file (your API key) +- Requires `INFERMATIC_MODEL` environment variable in `.env` file (default: Sao10K-L3.1-70B-Hanami-x1) + +**Examples:** +```bash +!text write a python function to calculate fibonacci numbers +!text --use-model llama-v3-8b-instruct explain quantum computing simply +!text --temperature 0.7 --max-tokens 500 write a haiku about artificial intelligence +!text --list-models +``` + +**Model Management:** +- Use `--list-models` to see available models with their capabilities +- Different models support various context lengths and specializations +- Costs and token limits vary by model ### Media & Search Commands diff --git a/funguy.conf b/funguy.conf index 9afb5c1..dcf2536 100644 --- a/funguy.conf +++ b/funguy.conf @@ -14,4 +14,4 @@ config_file = "funguy.conf" [plugins.disabled] "!uFhErnfpYhhlauJsNK:matrix.org" = [ "youtube-preview", "ai", "proxy",] "!vYcfWXpPvxeQvhlFdV:matrix.org" = [] -"!NXdVjDXPxXowPkrJJY:matrix.org" = [ "karma",] +"!NXdVjDXPxXowPkrJJY:matrix.org" = [ "karma"] diff --git a/funguy.py b/funguy.py index 37842a3..0384f90 100755 --- a/funguy.py +++ b/funguy.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Funguy Bot Class diff --git a/plugins/help.py b/plugins/help.py index 24fff76..db5cccd 100644 --- a/plugins/help.py +++ b/plugins/help.py @@ -77,6 +77,23 @@ async def handle_command(room, message, bot, prefix, config):

Fetches the current Bitcoin price in USD from bitcointicker.co API. Shows real-time BTC/USD price with proper formatting. Includes error handling for API timeouts and data parsing issues.

+
🌐 !whois <domain/ip> +

Perform comprehensive WHOIS lookups for domains and IP addresses. Retrieves registrar information, registration dates, name servers, and contact details from WHOIS databases.

+

Usage:

+
    +
  • !whois <domain> - Query domain registration information
  • +
  • !whois <ip> - Query IP address allocation details
  • +
+

Examples:

+
    +
  • !whois example.com
  • +
  • !whois google.com
  • +
  • !whois 8.8.8.8
  • +
  • !whois 1.1.1.1
  • +
+

Output includes: Domain/IP information, registrar, WHOIS server, creation/expiration dates, name servers, and contact details.

+
+
πŸ” !shodan [command] [query]

Shodan.io integration for security reconnaissance and threat intelligence.

Commands:

@@ -290,7 +307,33 @@ Search Exploit-DB for security vulnerabilities and exploits. Returns detailed in
πŸ“„ !text [prompt] -

Generates text using Ollama's Mistral 7B Instruct model. Options: --max_tokens, --temperature. Uses queuing system for sequential processing.

+

Generates text using the Infermatic AI API. Supports multiple models, configurable parameters, and model listing. Uses queuing system for sequential processing.

+

Usage:

+
    +
  • !text <prompt> - Generate text using the default model
  • +
  • !text --list-models - List all available models from Infermatic AI
  • +
  • !text --use-model <model_name> <prompt> - Use a specific model instead of the default
  • +
  • !text --temperature <value> <prompt> - Set temperature (0.0-1.0, default: 0.9)
  • +
  • !text --max-tokens <value> <prompt> - Set maximum tokens to generate (default: 2048)
  • +
+

Configuration:

+
    +
  • Requires INFERMATIC_API environment variable set to your API key
  • +
  • Requires INFERMATIC_MODEL environment variable for default model (default: Sao10K-L3.1-70B-Hanami-x1)
  • +
+

Model Management:

+
    +
  • Use !text --list-models to see all available models
  • +
  • Models support different capabilities and context lengths
  • +
  • Costs and token limits vary by model
  • +
+

Examples:

+
    +
  • !text write a python function to calculate fibonacci
  • +
  • !text --list-models
  • +
  • !text --use-model llama-v3-8b-instruct explain quantum computing
  • +
  • !text --temperature 0.7 --max-tokens 500 write a haiku about AI
  • +
πŸ“° !xkcd diff --git a/plugins/infermatic-text.py b/plugins/infermatic-text.py new file mode 100644 index 0000000..2dc2218 --- /dev/null +++ b/plugins/infermatic-text.py @@ -0,0 +1,233 @@ +""" +Plugin for generating text using Infermatic AI API and sending it to a Matrix chat room. +""" + +import os +import requests +import argparse +import json +import simplematrixbotlib as botlib +from asyncio import Queue +from dotenv import load_dotenv + +# Load environment variables from .env file in the parent directory +plugin_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.dirname(plugin_dir) +dotenv_path = os.path.join(parent_dir, '.env') +load_dotenv(dotenv_path) + +# Infermatic AI API configuration +INFERMATIC_API_KEY = os.getenv("INFERMATIC_API", "") +DEFAULT_MODEL = os.getenv("INFERMATIC_MODEL", "Sao10K-L3.1-70B-Hanami-x1") +INFERMATIC_API_BASE = "https://api.totalgpt.ai/v1" + +# Queue to store pending commands +command_queue = Queue() + +async def process_command(room, message, bot, prefix, config): + """Queue and process !text commands sequentially.""" + match = botlib.MessageMatch(room, message, bot, prefix) + if match.prefix() and match.command("text"): + if command_queue.empty(): + await handle_command(room, message, bot, prefix, config) + else: + await command_queue.put((room, message, bot, prefix, config)) + await bot.api.send_text_message(room.room_id, "Command queued. Please wait for the current request to finish.") + +async def handle_command(room, message, bot, prefix, config): + """Handle !text command: generate text using Infermatic AI API.""" + match = botlib.MessageMatch(room, message, bot, prefix) + + if not (match.prefix() and match.command("text")): + return + + # Check if API key is configured + if not INFERMATIC_API_KEY: + await bot.api.send_text_message( + room.room_id, + "Infermatic API key not configured. Please set INFERMATIC_API environment variable." + ) + return + + # Parse command arguments + args = match.args() + + if len(args) < 1: + await show_usage(room, bot) + return + + # Check if it's a --list-models command + if args[0] == "--list-models": + await list_models(room, bot) + return + + # Parse other arguments + try: + # Extract options manually since argparse doesn't handle mixed positional/optional well + temperature = 0.9 + max_tokens = 2048 + custom_model = None + prompt_parts = [] + + i = 0 + while i < len(args): + if args[i] == "--temperature" and i + 1 < len(args): + temperature = float(args[i + 1]) + i += 2 + elif args[i] == "--max-tokens" and i + 1 < len(args): + max_tokens = int(args[i + 1]) + i += 2 + elif args[i] == "--use-model" and i + 1 < len(args): + custom_model = args[i + 1] + i += 2 + else: + prompt_parts.append(args[i]) + i += 1 + + prompt = ' '.join(prompt_parts).strip() + + if not prompt: + await show_usage(room, bot) + return + + model = custom_model or DEFAULT_MODEL + + await generate_text(room, bot, prompt, model, temperature, max_tokens) + + except ValueError as e: + await bot.api.send_text_message(room.room_id, f"Invalid parameter value: {e}") + except Exception as e: + await bot.api.send_text_message(room.room_id, f"Error processing command: {str(e)}") + +async def show_usage(room, bot): + """Display command usage information.""" + usage = """ +πŸ“„ Infermatic Text Generation Usage: + +Basic: +β€’ !text <prompt> - Generate text using default model + +Commands: +β€’ !text --list-models - List all available models +β€’ !text --use-model <model> <prompt> - Use specific model + +Parameters: +β€’ --temperature <0.0-1.0> - Set temperature (default: 0.9) +β€’ --max-tokens <number> - Set max tokens (default: 2048) + +Examples: +β€’ !text write a python function to calculate fibonacci +β€’ !text --list-models +β€’ !text --use-model llama-v3-8b-instruct explain quantum computing +β€’ !text --temperature 0.7 write a haiku about AI +""" + await bot.api.send_markdown_message(room.room_id, usage) + +async def list_models(room, bot): + """List all available models from Infermatic AI.""" + try: + await bot.api.send_text_message(room.room_id, "πŸ” Fetching available models...") + + url = f"{INFERMATIC_API_BASE}/models" + headers = { + "Authorization": f"Bearer {INFERMATIC_API_KEY}", + "Content-Type": "application/json" + } + + response = requests.get(url, headers=headers, timeout=30) + response.raise_for_status() + + data = response.json() + models = data.get('data', []) + + if not models: + await bot.api.send_text_message(room.room_id, "No models found or error in response.") + return + + # Format the model list + output = "πŸ”§ Available Models:

" + + for model in models: + model_id = model.get('id', 'Unknown') + model_name = model.get('name', model_id) + context_length = model.get('context_length', 'Unknown') + pricing = model.get('pricing', {}) + + output += f"β€’ {model_name}
" + output += f" └─ ID: {model_id}
" + output += f" └─ Context: {context_length}
" + + if pricing: + prompt_price = pricing.get('prompt', '0') + completion_price = pricing.get('completion', '0') + output += f" └─ Price: ${prompt_price}/${completion_price} per 1M tokens
" + + output += f" └─ Usage: !text --use-model {model_id} <prompt>

" + + # Wrap in collapsible details since list can be long + output = f"
πŸ”§ Available Models (Click to expand){output}
" + + await bot.api.send_markdown_message(room.room_id, output) + + except requests.exceptions.RequestException as e: + await bot.api.send_text_message(room.room_id, f"❌ Error fetching models: {str(e)}") + except Exception as e: + await bot.api.send_text_message(room.room_id, f"❌ Unexpected error: {str(e)}") + +async def generate_text(room, bot, prompt, model, temperature, max_tokens): + """Generate text using the Infermatic AI API.""" + try: + # Send initial processing message + await bot.api.send_text_message(room.room_id, f"πŸ“ Generating text...") + + url = f"{INFERMATIC_API_BASE}/chat/completions" + headers = { + "Authorization": f"Bearer {INFERMATIC_API_KEY}", + "Content-Type": "application/json" + } + + payload = { + "model": model, + "messages": [ + {"role": "user", "content": prompt} + ], + "temperature": temperature, + "max_tokens": max_tokens + } + + response = requests.post(url, headers=headers, json=payload, timeout=120) + response.raise_for_status() + + data = response.json() + generated_text = data.get('choices', [{}])[0].get('message', {}).get('content', '').strip() + + if not generated_text: + await bot.api.send_text_message(room.room_id, "No response generated.") + return + + # Format the output with collapsible sections + output = f"
πŸ“ Generated Text (Click to expand)" + output += f"Model: {model}

" + output += f"Prompt: {prompt}

" + output += f"Response:

" + output += f"{generated_text}" + output += f"
" + + await bot.api.send_markdown_message(room.room_id, output) + + except requests.exceptions.Timeout: + await bot.api.send_text_message(room.room_id, "❌ Request timed out. The model is taking too long to respond.") + except requests.exceptions.HTTPError as e: + if e.response.status_code == 401: + await bot.api.send_text_message(room.room_id, "❌ Authentication failed. Please check your INFERMATIC_API key.") + elif e.response.status_code == 429: + await bot.api.send_text_message(room.room_id, "❌ Rate limit exceeded. Please try again later.") + else: + await bot.api.send_text_message(room.room_id, f"❌ API error: HTTP {e.response.status_code}") + except Exception as e: + await bot.api.send_text_message(room.room_id, f"❌ Error generating text: {str(e)}") + finally: + # Process next queued command + if not command_queue.empty(): + next_command = await command_queue.get() + await handle_command(*next_command) diff --git a/plugins/sd_text.py b/plugins/sd_text.py deleted file mode 100644 index 4352ebd..0000000 --- a/plugins/sd_text.py +++ /dev/null @@ -1,95 +0,0 @@ -""" -Plugin for generating text using Ollama's Mistral 7B Instruct model and sending it to a Matrix chat room. -""" - -import requests -from asyncio import Queue -import simplematrixbotlib as botlib -import argparse - -# Queue to store pending commands -command_queue = Queue() - -API_URL = "http://localhost:11434/api/generate" -MODEL_NAME = "mistral:7b-instruct" - -async def process_command(room, message, bot, prefix, config): - """ - Queue and process !text commands sequentially. - """ - match = botlib.MessageMatch(room, message, bot, prefix) - if match.prefix() and match.command("text"): - if command_queue.empty(): - await handle_command(room, message, bot, prefix, config) - else: - await command_queue.put((room, message, bot, prefix, config)) - -async def handle_command(room, message, bot, prefix, config): - """ - Send the prompt to Ollama API and return the generated text. - """ - match = botlib.MessageMatch(room, message, bot, prefix) - if not (match.prefix() and match.command("text")): - return - - # Parse optional arguments - parser = argparse.ArgumentParser(description='Generate text using Ollama API') - parser.add_argument('--max_tokens', type=int, default=512, help='Maximum tokens to generate') - parser.add_argument('--temperature', type=float, default=0.7, help='Temperature for generation') - parser.add_argument('prompt', nargs='+', help='Prompt for the model') - - try: - args = parser.parse_args(message.body.split()[1:]) # Skip command itself - prompt = ' '.join(args.prompt).strip() - - if not prompt: - await bot.api.send_text_message(room.room_id, "Usage: !text ") - return - - payload = { - "model": MODEL_NAME, - "prompt": prompt, - "max_tokens": args.max_tokens, - "temperature": args.temperature, - "stream": False - } - - response = requests.post(API_URL, json=payload, timeout=60) - response.raise_for_status() - r = response.json() - - generated_text = r.get("response", "").strip() - if not generated_text: - generated_text = "(No response from model)" - - await bot.api.send_text_message(room.room_id, generated_text) - - except argparse.ArgumentError as e: - await bot.api.send_text_message(room.room_id, f"Argument error: {e}") - except requests.exceptions.RequestException as e: - await bot.api.send_text_message(room.room_id, f"Error connecting to Ollama API: {e}") - except Exception as e: - await bot.api.send_text_message(room.room_id, f"Unexpected error: {e}") - finally: - # Process next command from the queue, if any - if not command_queue.empty(): - next_command = await command_queue.get() - await handle_command(*next_command) - -def print_help(): - """ - Generates help text for the !text command. - """ - return """ -

Generate text using Ollama's Mistral 7B Instruct model

- -

Usage:

-
    -
  • !text - Basic prompt for the model
  • -
  • Optional arguments:
  • -
      -
    • --max_tokens MAX_TOKENS - Maximum tokens to generate (default 512)
    • -
    • --temperature TEMPERATURE - Sampling temperature (default 0.7)
    • -
    -
-""" diff --git a/plugins/stable-diffusion.py b/plugins/stable-diffusion.py index 8a77889..0872db5 100644 --- a/plugins/stable-diffusion.py +++ b/plugins/stable-diffusion.py @@ -118,7 +118,7 @@ async def handle_command(room, message, bot, prefix, config): r = response.json() # Use secure temporary file - with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp_file: + with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file: filename = temp_file.name temp_file.write(base64.b64decode(r['images'][0])) diff --git a/plugins/youtube-preview.py b/plugins/youtube-preview.py deleted file mode 100644 index 4a7305e..0000000 --- a/plugins/youtube-preview.py +++ /dev/null @@ -1,187 +0,0 @@ -""" -Plugin for providing a command to fetch YouTube video information from links. -""" - -# Importing necessary libraries -import re -import logging -import asyncio -import aiohttp -import yt_dlp -import simplematrixbotlib as botlib -from youtube_title_parse import get_artist_title - -LYRICIST_API_URL = "https://lyrist.vercel.app/api/{}/{}" - - -def seconds_to_minutes_seconds(seconds): - """ - Converts seconds to a string representation of minutes and seconds. - - Args: - seconds (int): The number of seconds. - - Returns: - str: A string representation of minutes and seconds in the format MM:SS. - """ - minutes = seconds // 60 - seconds %= 60 - return f"{minutes:02d}:{seconds:02d}" - - -async def fetch_lyrics(song, artist): - """ - Asynchronously fetches lyrics for a song from the Lyricist API. - - Args: - song (str): The name of the song. - artist (str): The name of the artist. - - Returns: - str: Lyrics of the song. - None if an error occurs during fetching. - """ - try: - async with aiohttp.ClientSession() as session: - url = LYRICIST_API_URL.format(artist, song) - logging.info(f"Fetching lyrics from: {url}") - async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response: - if response.status == 200: - data = await response.json() - return data.get("lyrics") - else: - logging.warning(f"Lyrics API returned status {response.status}") - return None - except asyncio.TimeoutError: - logging.error("Timeout fetching lyrics") - return None - except Exception as e: - logging.error(f"Error fetching lyrics: {str(e)}") - return None - - -async def fetch_youtube_info(youtube_url): - """ - Asynchronously fetches information about a YouTube video using yt-dlp. - - Args: - youtube_url (str): The URL of the YouTube video. - - Returns: - str: A message containing information about the YouTube video. - None if an error occurs during fetching. - """ - try: - logging.info(f"Fetching YouTube info for: {youtube_url}") - - # Configure yt-dlp options - ydl_opts = { - 'quiet': True, - 'no_warnings': True, - 'extract_flat': False, - 'skip_download': True, - } - - # Run yt-dlp in thread pool to avoid blocking - loop = asyncio.get_event_loop() - - def extract_info(): - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - return ydl.extract_info(youtube_url, download=False) - - info = await loop.run_in_executor(None, extract_info) - - if not info: - logging.error("No info returned from yt-dlp") - return None - - # Extract video information - title = info.get('title', 'Unknown Title') - description = info.get('description', 'No description available') - duration = info.get('duration', 0) - view_count = info.get('view_count', 0) - uploader = info.get('uploader', 'Unknown') - - logging.info(f"Video title: {title}") - - length = seconds_to_minutes_seconds(duration) - - # Parse artist and song from title - artist, song = get_artist_title(title) - logging.info(f"Parsed artist: {artist}, song: {song}") - - # Limit description length to avoid huge messages - if len(description) > 500: - description = description[:500] + "..." - - description_with_breaks = description.replace('\n', '
') - - # Build basic info message - info_message = f"""🎬🎝 Title: {title}
Length: {length} | Views: {view_count:,} | Uploader: {uploader}
‡︎Description‡︎{description_with_breaks}
""" - - # Try to fetch lyrics if artist and song were parsed - if artist and song: - logging.info("Attempting to fetch lyrics...") - lyrics = await fetch_lyrics(song, artist) - if lyrics: - lyrics = lyrics.replace('\n', "
") - # Limit lyrics length - if len(lyrics) > 3000: - lyrics = lyrics[:3000] + "
...(truncated)" - info_message += f"
🎡 Lyrics:
{lyrics}
" - else: - logging.info("No lyrics found") - else: - logging.info("Could not parse artist/song from title, skipping lyrics") - - return info_message - except Exception as e: - logging.error(f"Error fetching YouTube video information: {str(e)}", exc_info=True) - return None - - -async def handle_command(room, message, bot, prefix, config): - """ - Asynchronously handles the command to fetch YouTube video information. - - Args: - room (Room): The Matrix room where the command was invoked. - message (RoomMessage): The message object containing the command. - bot (MatrixBot): The Matrix bot instance. - prefix (str): The command prefix. - config (dict): The bot's configuration. - - Returns: - None - """ - match = botlib.MessageMatch(room, message, bot, prefix) - - # Check if message contains a YouTube link - if match.is_not_from_this_bot() and re.search(r'(youtube\.com/watch\?v=|youtu\.be/)', message.body): - logging.info(f"YouTube link detected in message: {message.body}") - - # Match both youtube.com and youtu.be formats - video_id_match = re.search(r'(?:youtube\.com/watch\?v=|youtu\.be/)([a-zA-Z0-9_-]{11})', message.body) - - if video_id_match: - video_id = video_id_match.group(1) - youtube_url = f"https://www.youtube.com/watch?v={video_id}" - logging.info(f"Fetching information for YouTube video ID: {video_id}") - - retry_count = 2 # Reduced retries since yt-dlp is more reliable - while retry_count > 0: - info_message = await fetch_youtube_info(youtube_url) - if info_message: - await bot.api.send_markdown_message(room.room_id, info_message) - logging.info("Sent YouTube video information to the room") - break - else: - logging.warning(f"Failed to fetch info, retrying... ({retry_count-1} attempts left)") - retry_count -= 1 - if retry_count > 0: - await asyncio.sleep(2) # wait for 2 seconds before retrying - else: - logging.error("Failed to fetch YouTube video information after all retries") - await bot.api.send_text_message(room.room_id, "Failed to fetch YouTube video information. The video may be unavailable or age-restricted.") - else: - logging.warning("Could not extract video ID from YouTube URL") diff --git a/plugins/youtube-search.py b/plugins/youtube-search.py index 28d52af..21b3624 100644 --- a/plugins/youtube-search.py +++ b/plugins/youtube-search.py @@ -28,7 +28,7 @@ async def handle_command(room, message, bot, PREFIX, config): else: search_terms = " ".join(args) logging.info(f"Performing YouTube search for: {search_terms}") - results = YoutubeSearch(search_terms, max_results=1).to_dict() + results = YoutubeSearch(search_terms, max_results=3).to_dict() if results: output = generate_output(results) await send_collapsible_message(room, bot, output) diff --git a/requirements.txt b/requirements.txt index e62bf08..b9d462f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,3 +13,5 @@ schedule yt-dlp pyopenssl psutil +toml +python-whois \ No newline at end of file