26 Commits
v3.0 ... v4.0.1

Author SHA1 Message Date
75fb627361 Clean up type hints everywhere, overhaul bot configuration
All checks were successful
Build Heidi Docker image / build-docker (push) Successful in 2m12s
2025-09-23 20:43:42 +02:00
591f4ea191 Add line-length configuration 2025-09-23 20:37:38 +02:00
f167b23dcc Update development dependencies 2025-09-23 20:37:27 +02:00
d319fa21f5 Update python interpreter version in dockerfile 2025-09-23 20:37:17 +02:00
44a1ea2c83 Add sounds 2025-09-23 20:34:28 +02:00
d04244221b Replace ENTRANCE.SOUND menu with dropdowns
All checks were successful
Build Heidi Docker image / build-docker (push) Successful in 14s
2023-12-09 23:01:24 +01:00
c2847de7dd Add instantbuttons command + make responses ephemeral
All checks were successful
Build Heidi Docker image / build-docker (push) Successful in 14s
/instantbuttons displays a soundboard via a button ui
2023-12-09 19:51:28 +01:00
08230eb3de Enforce heidi_spam channel for commands
All checks were successful
Build Heidi Docker image / build-docker (push) Successful in 14s
2023-12-09 18:44:16 +01:00
f2ddb4ab66 Only play entrance sound when other is present + reformat
All checks were successful
Build Heidi Docker image / build-docker (push) Successful in 14s
2023-12-09 18:04:24 +01:00
876232f674 Ignore user config file 2023-12-09 18:03:42 +01:00
d7c3a7c740 Allow sounds with different file extensions
All checks were successful
Build Heidi Docker image / build-docker (push) Successful in 14s
Before only .mkv files could be played, as the extension was hardcoded
2023-12-09 17:55:21 +01:00
bdcd5208a7 Untrack Heidi_User.conf 2023-12-09 17:54:56 +01:00
79fcf0142a Some more options for randomly selected answers 2023-12-09 17:48:27 +01:00
0f6cc12182 Delete orphaned code
All checks were successful
Build Heidi Docker image / build-docker (push) Successful in 27s
2023-12-09 17:36:53 +01:00
9b66061ee7 Reformat TODO comments
All checks were successful
Build Heidi Docker image / build-docker (push) Successful in 15s
2023-12-09 17:35:04 +01:00
c6608e4695 Remove rocm stuff from flake 2023-12-09 17:34:54 +01:00
3335009692 Fix SOUNDDIR being in the wrong file
Some checks failed
Build Heidi Docker image / build-docker (push) Failing after 17s
2023-12-09 17:28:09 +01:00
d7604b6604 Update flake.lock 2023-12-09 17:20:46 +01:00
2e493e404b Split Heidi into multiple parts
All checks were successful
Build Heidi Docker image / build-docker (push) Successful in 28s
2023-12-09 17:14:05 +01:00
16822e0212 Remove commented out code + add docstrings 2023-12-09 16:31:00 +01:00
9d78352ea5 Update handling of "None" 2023-12-09 15:48:19 +01:00
13b3e9910a Add sounds 2023-12-09 15:28:26 +01:00
1b89d2ef3b Update flake.lock 2023-12-09 15:28:20 +01:00
6debffbd77 Add Suiii sound 2023-11-27 20:50:58 +01:00
e08c1c0204 Add joko sounds 2023-11-26 12:18:13 +01:00
82f0387675 Add yakari sound 2023-11-26 00:59:04 +01:00
76 changed files with 905 additions and 1043 deletions

1
.gitignore vendored
View File

@ -12,3 +12,4 @@ Pipfile.lock
/disabled_voicelines/
*.svg
.vscode
Heidi_User.conf

View File

@ -1,38 +0,0 @@
workflow: # for entire pipeline
rules:
- if: '$CI_COMMIT_REF_NAME == "master"' # only run on master...
changes: # ...and when these files have changed
- "*.py"
- "Dockerfile"
docker-build:
stage: build
image: docker:20 # provides the docker toolset (but without an active daemon)
services: # configure images that run during jobs linked to the image (above)
- docker:dind # dind build on docker and starts up the dockerdaemon (docker itself doesn't do that), which is needed to call docker build etc.
before_script:
- docker login -u $CI_REGISTRY_USER -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker pull $CI_REGISTRY_IMAGE:latest || true # latest image for cache (not failing if image is not found)
- >
docker build
--pull
--cache-from $CI_REGISTRY_IMAGE:latest
--label "org.opencontainers.image.title=$CI_PROJECT_TITLE"
--label "org.opencontainers.image.url=$CI_PROJECT_URL"
--label "org.opencontainers.image.created=$CI_JOB_STARTED_AT"
--label "org.opencontainers.image.revision=$CI_COMMIT_SHA"
--label "org.opencontainers.image.version=$CI_COMMIT_REF_NAME"
--tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
.
- docker tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA $CI_REGISTRY_IMAGE:latest
- docker push $CI_REGISTRY_IMAGE:latest
docker-deploy:
stage: deploy
image: alpine:3.15
needs: ["docker-build"]
script:
- chmod og= $ID_RSA
- apk update && apk add openssh-client
- ssh -i $ID_RSA -o StrictHostKeyChecking=no $SERVER_USER@$SERVER_IP "/home/christoph/$CI_PROJECT_TITLE/launch.sh"

View File

@ -1,10 +1,10 @@
# syntax=docker/dockerfile:1
FROM python:3.13.7-slim-trixie
RUN apt-get update -y && apt-get install -y ffmpeg libopus0
FROM python:3.10.1-slim-buster
RUN apt-get update -y
RUN apt-get install -y ffmpeg libopus0
WORKDIR /app
COPY requirements.txt requirements.txt
RUN pip3 install -r requirements.txt
COPY . .
RUN pip3 install -r requirements.txt
CMD ["python3", "-u", "bot.py"]

738
bot.py
View File

@ -1,247 +1,63 @@
# Example: https://github.com/Rapptz/discord.py/blob/master/examples/app_commands/basic.py
import os, re, random, logging, asyncio, discord, configparser
from discord import app_commands
import os
import random
import logging
from types import CoroutineType
from typing import Any, Callable, override
from discord import app_commands, ui
from discord.app_commands import Choice
from functools import reduce
from dotenv import load_dotenv
from typing import Dict, List, Optional, Union
# TODO: Reenable + extend textgen
# from textgen import textgen
# from textgen_markov import MarkovTextGenerator
# from textgen_lstm import LSTMTextGenerator
from discord.client import Client
from discord.components import SelectOption
from discord.enums import ButtonStyle
from discord.flags import Intents
from discord.interactions import Interaction
from discord.member import Member, VoiceState
from discord.message import Message
# TODO: Reenable + extend scraper
# from models import Models
# We're fancy today
from rich.traceback import install
install(show_locals=True)
load_dotenv()
from heidi_client import HeidiClient
from heidi_config import ConfigSection, FlagsConfigKey
from heidi_constants import DOCKER, HEIDI_SPAM_ID, SOUNDDIR
from heidi_helpers import enforce_channel, play_voice_line_for_member
# ================================================================================================ #
# ================================================================================================ #
# NOTE: Always set this correctly:
DOCKER = os.getenv("DOCKER") == "True"
# ================================================================================================ #
# ================================================================================================ #
# TODO: Only post in heidi-spam channel
# TODO: yt-dlp music support
# TODO: Somehow upload voicelines more easily (from discord voice message?)
# IDs of the servers Heidi is used on
LINUS_GUILD = discord.Object(id=431154792308408340)
TEST_GUILD = discord.Object(id=821511861178204161)
CONFIGPATH = "/config" if DOCKER else "."
USERCONFIGNAME = "Heidi_User.conf"
class HeidiClient(discord.Client):
def __init__(self, *, intents: discord.Intents):
super().__init__(status="Nur eine kann GNTM werden!", intents=intents)
# Separate object that keeps all application command state
self.tree = app_commands.CommandTree(self)
# Handle persistent user configuration
self.user_config = configparser.ConfigParser()
if not os.path.exists(f"{CONFIGPATH}/{USERCONFIGNAME}"):
os.mknod(f"{CONFIGPATH}/{USERCONFIGNAME}")
self.user_config.read(f"{CONFIGPATH}/{USERCONFIGNAME}")
self.update_to_default_user_config()
self.print_user_config()
# self.models = Models() # scraped model list
# automatic actions on all messages
# on_message_triggers is a map with tuples of two functions: (predicate, action)
# the predicate receives the message as argument
# if the predicate is true the action is performed
self.on_message_triggers = {
# lambda m: m.author.nick.lower() in self.models.get_in_names(): self.autoreact_to_girls,
lambda m: "jeremy" in m.author.nick.lower(): self._autoreact_to_jeremy,
lambda m: "kardashian" in m.author.nick.lower()
or "jenner" in m.author.nick.lower(): self._autoreact_to_kardashian,
}
# automatic actions on voice state changes
# on_voice_state_triggers is a map with tuples of two functions: (predicate, action)
# the predicate receives the member, before- and after-state as arguments
# if the predicate is true, the action is performed
self.on_voice_state_triggers = {
lambda m, b, a: b.channel != a.channel
and a.channel != None
and isinstance(a.channel, discord.VoiceChannel): self._play_entrance_sound,
}
# Textgen
# self.textgen_models: dict[str, textgen] = {
# # The name must correspond to the name of the training text file
# "kommunistisches_manifest": LSTMTextGenerator(10),
# "musk": LSTMTextGenerator(10),
# "bibel": LSTMTextGenerator(10)
# "bibel": MarkovTextGenerator(3), # Prefix length of 3
# "kommunistisches_manifest": MarkovTextGenerator(3),
# "musk": MarkovTextGenerator(3)
# }
# for name, model in self.textgen_models.items():
# model.init(name) # Loads the textfile
# if os.path.exists(f"weights/{name}_lstm_model.pt"):
# model.load()
# elif not DOCKER:
# model.train()
# else:
# print("Error: Can't load model", name)
# print("Generating test sentence for", name)
# self.textgen_models[name].generate_sentence()
# Synchronize commands to guilds
async def setup_hook(self):
self.tree.copy_global_to(guild=LINUS_GUILD)
await self.tree.sync(guild=LINUS_GUILD)
self.tree.copy_global_to(guild=TEST_GUILD)
await self.tree.sync(guild=TEST_GUILD)
def update_to_default_user_config(self):
"""
Adds config keys to the config, if they don't exist yet.
"""
user_config_sections = ["ENTRANCE.SOUND"]
for section in user_config_sections:
if section not in self.user_config:
print(f"Adding section {section} to {CONFIGPATH}/{USERCONFIGNAME}")
self.user_config[section] = dict()
self.write_user_config()
def print_user_config(self):
print("Read persistent configuration:\n")
for section in self.user_config.sections():
print(f"[{section}]")
for key in self.user_config[section]:
print(f"{key}={self.user_config[section][key]}")
print("")
def write_user_config(self):
if not os.path.exists(f"{CONFIGPATH}/{USERCONFIGNAME}"):
print(f"Error: {CONFIGPATH}/{USERCONFIGNAME} doesn't exist!")
return
print(f"Writing {CONFIGPATH}/{USERCONFIGNAME}")
with open(f"{CONFIGPATH}/{USERCONFIGNAME}", "w") as file:
self.user_config.write(file)
# Commands -----------------------------------------------------------------------------------
# async def list_models_in(self, message):
# """
# wer ist dabei?
# """
# await message.channel.send("\n".join(self.models.get_in_names()))
# async def list_models_out(self, message):
# """
# wer ist raus? (Liste der Keks welche ge*ickt wurden)
# """
# await message.channel.send("\n".join(self.models.get_out_names()))
# async def show_model_picture(self, message):
# """
# gib Bild von <Name>
# """
# name = message.content.split()[-1]
# picture = discord.Embed()
# picture.set_image(url=self.models.get_image(name))
# picture.set_footer(text=name)
# await message.channel.send(embed=picture)
# Automatic Actions --------------------------------------------------------------------------
# @staticmethod
# async def autoreact_to_girls(message):
# """
# ❤ aktives Model
# """
# await message.add_reaction("❤")
@staticmethod
async def _autoreact_to_jeremy(message: discord.Message):
"""
🧀 Jeremy
"""
await message.add_reaction("🧀")
@staticmethod
async def _autoreact_to_kardashian(message: discord.Message):
"""
💄 Kardashian
"""
await message.add_reaction("💄")
async def _play_entrance_sound(
self,
member: discord.Member,
before: discord.VoiceState,
after: discord.VoiceState,
):
soundpath: Union[str, None] = self.user_config["ENTRANCE.SOUND"].get(
member.name, None
)
if soundpath == None:
print(f"User {member.name} has not set an entrance sound")
return
board, sound = soundpath.split("/")
# Wait a bit to not have simultaneous joins
await asyncio.sleep(1)
await play_voice_line_for_member(None, member, board, sound)
# ------------------------------------------------------------------------------------------------
# Log to file
handler = logging.FileHandler(filename="discord.log", encoding="utf-8", mode="w")
# Intents specification is no longer optional
intents = discord.Intents.default()
intents: Intents = Intents.default()
intents.members = True # Allow to react to member join/leave etc
intents.message_content = True # Allow to read message content from arbitrary messages
intents.voice_states = True # Allow to process on_voice_state_update
# Setup our client
# Set up our client
client = HeidiClient(intents=intents)
# Events -----------------------------------------------------------------------------------------
# NOTE: I defined the events outside of the Client class, don't know if I like it or not...
# NOTE: I defined the events outside the Client class, don't know if I like it or not...
@client.event
async def on_ready():
if client.user != None:
async def on_ready() -> None:
"""
This event triggers when the Heidi client has finished connecting.
"""
if client.user is not None:
print(f"{client.user} (id: {client.user.id}) has connected to Discord!")
else:
print("client.user is None!")
@client.event
async def on_message(message: discord.Message):
async def on_message(message: Message) -> None:
"""
This event triggers when a message is sent in any text channel.
"""
# Skip Heidis own messages
if message.author == client.user:
return
@ -256,9 +72,10 @@ async def on_message(message: discord.Message):
@client.event
async def on_voice_state_update(
member: discord.Member, before: discord.VoiceState, after: discord.VoiceState
):
async def on_voice_state_update(member: Member, before: VoiceState, after: VoiceState) -> None:
"""
This event triggers when a member joins/changes/leaves a voice channel or mutes/unmutes.
"""
# Skip Heidis own voice state updates (e.g. on /say)
if member._user == client.user:
return
@ -267,7 +84,9 @@ async def on_voice_state_update(
# python iterates over the keys of a map
for predicate in client.on_voice_state_triggers:
if predicate(member, before, after):
action = client.on_voice_state_triggers[predicate]
action: Callable[[Member, VoiceState, VoiceState], CoroutineType[Any, Any, None]] = (
client.on_voice_state_triggers[predicate]
)
print(f"on_voice_state_update: calling {action.__name__}")
await action(member, before, after)
@ -275,104 +94,207 @@ async def on_voice_state_update(
# Config Commands --------------------------------------------------------------------------------
async def user_config_key_autocomplete(
interaction: discord.Interaction, current: str
) -> List[Choice[str]]:
class FlagValueSelect(ui.Select[ui.View]):
def __init__(
self, flag: str, on_flag_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]]
) -> None:
self.flag: str = flag
self.on_flag_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]] = (
on_flag_select_callback
)
options: list[SelectOption] = [SelectOption(label=val, value=val) for val in ["True", "False"]]
super().__init__(placeholder="Select Value", min_values=1, max_values=1, options=options)
@override
async def callback(self, interaction: Interaction) -> None:
await self.on_flag_select_callback(interaction, self.flag, self.values[0])
class FlagValueView(ui.View):
def __init__(
self, flag: str, on_flag_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]]
) -> None:
super().__init__(timeout=600)
_ = self.add_item(FlagValueSelect(flag, on_flag_select_callback))
class FlagsSelect(ui.Select[ui.View]):
def __init__(
self, on_flag_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]]
) -> None:
self.on_flag_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]] = (
on_flag_select_callback
)
options: list[SelectOption] = [SelectOption(label=flag.value, value=flag.value) for flag in FlagsConfigKey]
super().__init__(placeholder="Select Flag", min_values=1, max_values=1, options=options)
@override
async def callback(self, interaction: Interaction) -> None:
_ = await interaction.response.send_message(
"Welchen Wert willst du setzen?",
view=FlagValueView(self.values[0], self.on_flag_select_callback),
ephemeral=True,
)
class FlagsView(ui.View):
def __init__(
self, on_flag_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]]
) -> None:
super().__init__(timeout=600)
_ = self.add_item(FlagsSelect(on_flag_select_callback))
class EntranceSoundBoardSelect(ui.Select[ui.View]):
def __init__(
self, on_sound_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]]
) -> None:
self.on_sound_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]] = (
on_sound_select_callback
)
options: list[SelectOption] = [SelectOption(label=board, value=board) for board in os.listdir(f"{SOUNDDIR}")]
super().__init__(placeholder="Select Board", min_values=1, max_values=1, options=options)
@override
async def callback(self, interaction: Interaction) -> None:
_ = await interaction.response.send_message(
"Welchen sound willst du?",
view=EntranceSoundSoundView(self.values[0], self.on_sound_select_callback),
ephemeral=True,
)
class EntranceSoundBoardView(ui.View):
def __init__(
self, on_sound_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]]
) -> None:
super().__init__(timeout=600)
_ = self.add_item(EntranceSoundBoardSelect(on_sound_select_callback))
class EntranceSoundSoundSelect(ui.Select[ui.View]):
def __init__(
self, board: str, on_sound_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]]
) -> None:
self.board: str = board
self.on_sound_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]] = (
on_sound_select_callback
)
options: list[SelectOption] = [
SelectOption(label=sound.split(".")[0], value=sound) for sound in os.listdir(f"{SOUNDDIR}/{board}")
]
super().__init__(placeholder="Select Sound", min_values=1, max_values=1, options=options)
@override
async def callback(self, interaction: Interaction) -> None:
await self.on_sound_select_callback(interaction, self.board, self.values[0])
class EntranceSoundSoundView(ui.View):
def __init__(
self, board: str, on_sound_select_callback: Callable[[Interaction, str, str], CoroutineType[Any, Any, None]]
) -> None:
super().__init__(timeout=600)
_ = self.add_item(EntranceSoundSoundSelect(board, on_sound_select_callback))
async def user_config_key_autocomplete(interaction: Interaction, current: str) -> list[Choice[str]]:
"""
Suggest a value from the user config keys (each .conf section is a key).
"""
return [
Choice(name=key, value=key)
for key in client.user_config.sections()
if key.lower().startswith(current.lower())
Choice(name=key, value=key) for key in client.user_config.sections() if key.lower().startswith(current.lower())
]
async def user_config_value_autocomplete(
interaction: discord.Interaction, current: str
) -> List[Choice[str]]:
"""
Calls an autocomplete function depending on the entered config_key.
"""
autocompleters = {"ENTRANCE.SOUND": user_entrance_sound_autocomplete}
autocompleter = autocompleters[interaction.namespace.option]
print(f"config_value_autocomplete: calling {autocompleter.__name__}")
return autocompleter(interaction, current)
def user_entrance_sound_autocomplete(
interaction: discord.Interaction, current: str
) -> List[Choice[str]]:
"""
Generates autocomplete options for the ENTRANCE.SOUND config key.
"""
boards: List[str] = os.listdir(SOUNDDIR)
all_sounds: Dict[str, List[str]] = {
board: list(map(lambda x: x.split(".")[0], os.listdir(f"{SOUNDDIR}/{board}/")))
for board in boards
} # These are all sounds, organized per board
completions: List[Choice[str]] = []
for (
board,
board_sounds,
) in all_sounds.items(): # Iterate over all sounds, organized per board
for sound in board_sounds: # Iterate over board specific sounds
soundpath = f"{board}/{sound}"
if soundpath.lower().startswith(current.lower()):
completions += [Choice(name=soundpath, value=soundpath)]
return completions
@client.tree.command(
name="userconfig",
name="heidiconfig",
description="User-spezifische Heidi-Einstellungen (Heidi merkt sie sich in ihrem riesigen Gehirn).",
)
@app_commands.rename(config_key="option")
@app_commands.describe(config_key="Die Option, welche du ändern willst.")
@app_commands.autocomplete(config_key=user_config_key_autocomplete)
@app_commands.rename(config_value="wert")
@app_commands.describe(
config_value="Der Wert, auf welche die Option gesetzt werden soll."
)
@app_commands.autocomplete(config_value=user_config_value_autocomplete)
async def user_config(
interaction: discord.Interaction, config_key: str, config_value: str
):
@enforce_channel(HEIDI_SPAM_ID)
async def user_config(interaction: Interaction[Client], config_key: str) -> None:
"""
Set a user config value for the calling user.
"""
# Only Members can set settings
if not isinstance(interaction.user, discord.Member):
if not isinstance(interaction.user, Member):
print("User not a member")
await interaction.response.send_message("Heidi sagt: Komm in die Gruppe!")
_ = await interaction.response.send_message("Heidi sagt: Komm in die Gruppe!", ephemeral=True)
return
member: discord.Member = interaction.user
member: Member = interaction.user
client.user_config[config_key][member.name] = config_value
async def on_flag_select_callback(interaction: Interaction, flag: str, value: str) -> None:
"""
This function is called when an FlagValueSelect option is selected.
"""
client.user_config[ConfigSection.FLAGS.value][flag] = value
client.write_user_config()
await interaction.response.send_message(
f"Ok, ich schreibe {member.name}={config_value} in mein fettes Gehirn!"
_ = await interaction.response.send_message(
f"Ok, ich schreibe {flag}={value} in mein fettes Gehirn!",
ephemeral=True,
)
async def on_sound_select_callback(interaction: Interaction, board: str, sound: str) -> None:
"""
This function is called when an EntrySoundSoundSelect option is selected.
"""
client.user_config[config_key][member.name] = f"{board}/{sound}"
client.write_user_config()
_ = await interaction.response.send_message(
f"Ok, ich schreibe {member.name}={board}/{sound} in mein fettes Gehirn!",
ephemeral=True,
)
# Views for different user config options are defined here
views: dict[str, tuple[type[ui.View], Callable[..., CoroutineType[Any, Any, None]], str]] = {
ConfigSection.FLAGS.value: (
FlagsView,
on_flag_select_callback,
"Welches Setting möchtest du ändern?",
),
ConfigSection.ENTRANCE_SOUND.value: (
EntranceSoundBoardView,
on_sound_select_callback,
"Aus welchem Soundboard soll dein Sound sein?",
),
}
view, select_callback, description = views[config_key]
_ = await interaction.response.send_message(
description,
view=view(select_callback), # pyright: ignore[reportCallIssue]
ephemeral=True,
)
# Commands ---------------------------------------------------------------------------------------
@client.tree.command(
name="giblinkbruder",
description="Heidi hilft mit dem Link zu deiner Lieblingsshow im Qualitätsfernsehen.",
)
async def show_link(interaction: discord.Interaction):
link_pro7 = "https://www.prosieben.de/tv/germanys-next-topmodel/livestream"
link_joyn = "https://www.joyn.de/serien/germanys-next-topmodel"
await interaction.response.send_message(
f"ProSieben: {link_pro7}\nJoyn: {link_joyn}"
)
@client.tree.command(name="heidi", description="Heidi!")
async def heidi_exclaim(interaction: discord.Interaction):
@enforce_channel(HEIDI_SPAM_ID)
async def heidi_exclaim(interaction: Interaction) -> None:
"""
Print a random Heidi quote.
"""
messages = [
"Die sind doch fast 18!",
"Heidi!",
@ -381,139 +303,162 @@ async def heidi_exclaim(interaction: discord.Interaction):
"Warum denn so schüchtern?",
"Im TV ist das legal!",
"Das Stroh ist nur fürs Shooting!",
"Jetzt sei doch mal sexy!",
"Stell dich nicht so an!",
"Models müssen da halt durch!",
"Heul doch nicht!",
]
await interaction.response.send_message(random.choice(messages))
_ = await interaction.response.send_message(random.choice(messages))
@client.tree.command(name="miesmuschel", description="Was denkt Heidi?")
@app_commands.rename(question="frage")
@app_commands.describe(question="Heidi wird es beantworten!")
async def magic_shell(interaction: discord.Interaction, question: str):
@enforce_channel(HEIDI_SPAM_ID)
async def magic_shell(interaction: Interaction, question: str) -> None:
"""
Answer a yes/no question.
"""
# Should be equal amounts of yes/no answers, to have a 50/50 chance.
choices = [
"Ja!",
"Jo.",
"Jo",
"Total!",
"Natürlich.",
"Natürlich",
"Klaro Karo",
"Offensichtlich Sherlock",
"Tom sagt Ja",
"Nein!",
"Nö.",
"Nä.",
"Niemals!",
"Nur über meine Leiche du Hurensohn!",
"In deinen Träumen.",
"Tom sagt Nein",
]
question = question.strip()
question_mark = "" if question[-1] == "?" else "?"
await interaction.response.send_message(
f"{question}{question_mark}\nHeidi sagt: {random.choice(choices)}"
)
_ = await interaction.response.send_message(f"{question}{question_mark}\nHeidi sagt: {random.choice(choices)}")
# TODO: Allow , separated varargs, need to parse manually as slash commands don't support varargs
# TODO: Allow separated varargs, need to parse manually as slash commands don't support varargs
@client.tree.command(name="wähle", description="Heidi trifft die Wahl!")
@app_commands.rename(option_a="entweder")
@app_commands.describe(option_a="Ist es vielleicht dies?")
@app_commands.rename(option_b="oder")
@app_commands.describe(option_b="Oder doch eher das?")
async def choose(interaction: discord.Interaction, option_a: str, option_b: str):
@enforce_channel(HEIDI_SPAM_ID)
async def choose(interaction: Interaction, option_a: str, option_b: str) -> None:
"""
Select an answer from two options.
"""
options = [option_a.strip(), option_b.strip()]
await interaction.response.send_message(
_ = await interaction.response.send_message(
f"{options[0]} oder {options[1]}?\nHeidi sagt: {random.choice(options)}"
)
# TextGen ----------------------------------------------------------------------------------------
# async def quote_model_autocomplete(interaction: discord.Interaction, current: str) -> list[Choice[str]]:
# models = client.textgen_models.keys()
# return [Choice(name=model, value=model) for model in models]
# @client.tree.command(name="zitat", description="Heidi zitiert!")
# @app_commands.rename(quote_model = "style")
# @app_commands.describe(quote_model = "Woraus soll Heidi zitieren?")
# @app_commands.autocomplete(quote_model = quote_model_autocomplete)
# async def quote(interaction: discord.Interaction, quote_model: str):
# generated_quote = client.textgen_models[quote_model].generate_sentence()
# joined_quote = " ".join(generated_quote)
# await interaction.response.send_message(f"Heidi zitiert: \"{joined_quote}\"")
# @client.tree.command(name="vervollständige", description="Heidi beendet den Satz!")
# @app_commands.rename(prompt = "satzanfang")
# @app_commands.describe(prompt = "Der Satzanfang wird vervollständigt.")
# @app_commands.rename(quote_model = "style")
# @app_commands.describe(quote_model = "Woraus soll Heidi vervollständigen?")
# @app_commands.autocomplete(quote_model = quote_model_autocomplete)
# async def complete(interaction: discord.Interaction, prompt: str, quote_model: str):
# prompt = re.sub(r"[^a-zäöüß'.,]+", " ", prompt.lower()) # only keep valid chars
# generated_quote = client.textgen_models[quote_model].complete_sentence(prompt.split())
# joined_quote = " ".join(generated_quote)
# await interaction.response.send_message(f"Heidi sagt: \"{joined_quote}\"")
# Sounds -----------------------------------------------------------------------------------------
SOUNDDIR: str = "/sounds" if DOCKER else "./heidi-sounds"
async def board_autocomplete(interaction: Interaction, current: str) -> list[Choice[str]]:
"""
Suggest a sound board.
"""
boards: list[str] = os.listdir(SOUNDDIR)
return [Choice(name=board, value=board) for board in boards if board.lower().startswith(current.lower())]
# Example: https://discordpy.readthedocs.io/en/latest/interactions/api.html?highlight=autocomplete#discord.app_commands.autocomplete
async def board_autocomplete(
interaction: discord.Interaction, current: str
) -> List[Choice[str]]:
boards: List[str] = os.listdir(SOUNDDIR)
return [
Choice(name=board, value=board)
for board in boards
if board.lower().startswith(current.lower())
]
async def sound_autocomplete(
interaction: discord.Interaction, current: str
) -> List[Choice[str]]:
async def sound_autocomplete(interaction: Interaction, current: str) -> list[Choice[str]]:
"""
Suggest a sound from an already selected board.
"""
board: str = interaction.namespace.board
sounds: List[str] = list(
map(lambda x: x.split(".")[0], os.listdir(f"{SOUNDDIR}/{board}/"))
)
sounds: list[str] = os.listdir(f"{SOUNDDIR}/{board}/")
return [
Choice(name=sound, value=sound)
for sound in sounds
if sound.lower().startswith(current.lower())
Choice(name=sound.split(".")[0], value=sound) for sound in sounds if sound.lower().startswith(current.lower())
]
@client.tree.command(
name="sag", description="Heidi drückt den Knopf auf dem Soundboard."
)
@client.tree.command(name="sag", description="Heidi drückt den Knopf auf dem Soundboard.")
@app_commands.describe(sound="Was soll Heidi sagen?")
@app_commands.autocomplete(board=board_autocomplete)
@app_commands.autocomplete(sound=sound_autocomplete)
async def say_voiceline(interaction: discord.Interaction, board: str, sound: str):
@enforce_channel(HEIDI_SPAM_ID)
async def say_voiceline(interaction: Interaction, board: str, sound: str) -> None:
"""
Play a voiceline in the calling member's current voice channel.
"""
# Only Members can access voice channels
if not isinstance(interaction.user, discord.Member):
if not isinstance(interaction.user, Member):
print("User not a member")
await interaction.response.send_message("Heidi sagt: Komm in die Gruppe!")
_ = await interaction.response.send_message("Heidi sagt: Komm in die Gruppe!", ephemeral=True)
return
member: discord.Member = interaction.user
member: Member = interaction.user
await play_voice_line_for_member(interaction, member, board, sound)
class InstantButton(ui.Button[ui.View]):
def __init__(self, label: str, board: str, sound: str) -> None:
super().__init__(style=ButtonStyle.red, label=label)
self.board: str = board
self.sound: str = sound
@override
async def callback(self, interaction: Interaction) -> None:
"""
Handle a press of the button.
"""
if not isinstance(interaction.user, Member):
_ = await interaction.response.send_message(
"Heidi mag keine discord.User, nur discord.Member!", ephemeral=True
)
return
await play_voice_line_for_member(interaction, interaction.user, self.board, self.sound)
class InstantButtonsView(ui.View):
def __init__(self, board: str, timeout: float | None = None) -> None:
super().__init__(timeout=timeout)
sounds = os.listdir(f"{SOUNDDIR}/{board}")
for sound in sounds:
_ = self.add_item(InstantButton(sound.split(".")[0], board, sound))
@client.tree.command(name="instantbuttons", description="Heidi malt Knöpfe für Sounds in den Chat.")
@app_commands.describe(board="Welches Soundboard soll knöpfe bekommen?")
@app_commands.autocomplete(board=board_autocomplete)
@enforce_channel(HEIDI_SPAM_ID)
async def soundboard_buttons(interaction: Interaction, board: str) -> None:
_ = await interaction.response.send_message(f"Soundboard: {board.capitalize()}", view=InstantButtonsView(board))
# Contextmenu ------------------------------------------------------------------------------------
# Callable on members
@client.tree.context_menu(name="beleidigen")
async def insult(
interaction: discord.Interaction, member: discord.Member
): # with message: discord.Message this can be called on a message
interaction: Interaction, member: Member
) -> None: # with message: discord.Message this can be called on a message
"""
Send an insult to a member via direct message.
"""
if not member.dm_channel:
await member.create_dm()
_ = await member.create_dm()
if not member.dm_channel:
print("Error creating DMChannel!")
await interaction.response.send_message("Heidi sagt: Gib mal DM Nummer süße*r!")
_ = await interaction.response.send_message("Heidi sagt: Gib mal DM Nummer süße*r!", ephemeral=True)
return
insults = [
@ -532,69 +477,12 @@ async def insult(
"Richtiger Gesichtsgünther ey!",
]
await member.dm_channel.send(random.choice(insults))
await interaction.response.send_message(
"Anzeige ist raus!"
_ = await member.dm_channel.send(random.choice(insults))
_ = await interaction.response.send_message(
"Anzeige ist raus!", ephemeral=True
) # with ephemeral = True only the caller can see the answer
# Helpers ----------------------------------------------------------------------------------------
async def play_voice_line(
interaction: Union[discord.Interaction, None],
voice_channel: discord.VoiceChannel,
board: str,
sound: str,
):
try:
open(f"{SOUNDDIR}/{board}/{sound}.mkv")
except IOError:
print("Error: Invalid soundfile!")
if interaction != None:
await interaction.response.send_message(
f'Heidi sagt: "{board}/{sound}" kanninich finden bruder'
)
return
if interaction != None:
await interaction.response.send_message(f'Heidi sagt: "{board}/{sound}"')
audio_source = discord.FFmpegPCMAudio(
f"{SOUNDDIR}/{board}/{sound}.mkv"
) # only works from docker
voice_client = await voice_channel.connect()
voice_client.play(audio_source)
while voice_client.is_playing():
await asyncio.sleep(1)
await voice_client.disconnect()
async def play_voice_line_for_member(
interaction: Union[discord.Interaction, None],
member: discord.Member,
board: str,
sound: str,
):
# Member needs to be in voice channel to hear audio (Heidi needs to know the channel to join)
if (
member == None
or member.voice == None
or member.voice.channel == None
or not isinstance(member.voice.channel, discord.VoiceChannel)
):
print("User not in (valid) voice channel!")
if interaction != None:
await interaction.response.send_message("Heidi sagt: Komm in den Channel!")
return
voice_channel: discord.VoiceChannel = member.voice.channel
await play_voice_line(interaction, voice_channel, board, sound)
# ------------------------------------------------------------------------------------------------

44
flake.lock generated
View File

@ -2,15 +2,14 @@
"nodes": {
"devshell": {
"inputs": {
"nixpkgs": "nixpkgs",
"systems": "systems"
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1700815693,
"narHash": "sha256-JtKZEQUzosrCwDsLgm+g6aqbP1aseUl1334OShEAS3s=",
"lastModified": 1741473158,
"narHash": "sha256-kWNaq6wQUbUMlPgw8Y+9/9wP0F8SHkjy24/mN3UAppg=",
"owner": "numtide",
"repo": "devshell",
"rev": "7ad1c417c87e98e56dcef7ecd0e0a2f2e5669d51",
"rev": "7c9e793ebe66bcba8292989a68c0419b737a22a0",
"type": "github"
},
"original": {
@ -21,14 +20,14 @@
},
"flake-utils": {
"inputs": {
"systems": "systems_2"
"systems": "systems"
},
"locked": {
"lastModified": 1694529238,
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
@ -39,11 +38,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1677383253,
"narHash": "sha256-UfpzWfSxkfXHnb4boXZNaKsAcUrZT9Hw+tao1oZxd08=",
"lastModified": 1722073938,
"narHash": "sha256-OpX0StkL8vpXyWOGUD6G+MA26wAXK6SpT94kLJXo6B4=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9952d6bc395f5841262b006fbace8dd7e143b634",
"rev": "e36e9f57337d0ff0cf77aceb58af4c805472bfae",
"type": "github"
},
"original": {
@ -55,11 +54,11 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1700856099,
"narHash": "sha256-RnEA7iJ36Ay9jI0WwP+/y4zjEhmeN6Cjs9VOFBH7eVQ=",
"lastModified": 1758446476,
"narHash": "sha256-5rdAi7CTvM/kSs6fHe1bREIva5W3TbImsto+dxG4mBo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "0bd59c54ef06bc34eca01e37d689f5e46b3fe2f1",
"rev": "a1f79a1770d05af18111fbbe2a3ab2c42c0f6cd0",
"type": "github"
},
"original": {
@ -90,21 +89,6 @@
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

View File

@ -5,62 +5,32 @@
inputs.flake-utils.url = "github:numtide/flake-utils";
inputs.devshell.url = "github:numtide/devshell";
outputs = { self, nixpkgs, flake-utils, devshell }:
flake-utils.lib.eachDefaultSystem (system:
let
outputs = {
self,
nixpkgs,
flake-utils,
devshell,
}:
flake-utils.lib.eachDefaultSystem (system: let
pkgs = import nixpkgs {
inherit system;
config.allowUnfree = true;
overlays = [ devshell.overlays.default ];
overlays = [devshell.overlays.default];
};
# TODO: Originally it was nixpkgs.fetchurl but that didn't work, pkgs.fetchurl did...
# Determine the difference between nixpkgs and pkgs
# Taken from: https://github.com/gbtb/nix-stable-diffusion/blob/master/flake.nix
# Overlay: https://nixos.wiki/wiki/Overlays
# FetchURL: https://ryantm.github.io/nixpkgs/builders/fetchers/
torch-rocm = pkgs.hiPrio (pkgs.python310Packages.torch-bin.overrideAttrs (old: {
src = pkgs.fetchurl {
name = "torch-1.12.1+rocm5.1.1-cp310-cp310-linux_x86_64.whl";
url = "https://download.pytorch.org/whl/rocm5.1.1/torch-1.12.1%2Brocm5.1.1-cp310-cp310-linux_x86_64.whl";
hash = "sha256-kNShDx88BZjRQhWgnsaJAT8hXnStVMU1ugPNMEJcgnA=";
};
}));
torchvision-rocm = pkgs.hiPrio (pkgs.python310Packages.torchvision-bin.overrideAttrs (old: {
src = pkgs.fetchurl {
name = "torchvision-0.13.1+rocm5.1.1-cp310-cp310-linux_x86_64.whl";
url = "https://download.pytorch.org/whl/rocm5.1.1/torchvision-0.13.1%2Brocm5.1.1-cp310-cp310-linux_x86_64.whl";
hash = "sha256-mYk4+XNXU6rjpgWfKUDq+5fH/HNPQ5wkEtAgJUDN/Jg=";
};
}));
myPython = pkgs.python311.withPackages (p: with p; [
# Basic
rich
# Discord
discordpy
python = pkgs.python313.withPackages (p:
with p; [
python-dotenv
pynacl
# Scraping
# beautifulsoup4
# requests
# MachineLearning
# torch-rocm
# torchvision-rocm
# numpy
# matplotlib
# nltk
discordpy
pynacl # DiscordPy Voice Support
]);
in {
devShell = pkgs.devshell.mkShell {
name = "HeidiBot";
packages = with pkgs; [
myPython
python
# nodePackages.pyright # LSP
];

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b8cc7d0eafc46aa981616128e1e86d1a27f36054aef4059076716d0480c96f00
size 66590

BIN
heidi-sounds/basic/Suiii.mkv (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:513a8c09c497af17832cf97dd927588ead6eae176e17cdcea07ffb0af517da4e
size 1918439

BIN
heidi-sounds/bg3/Disgusting.mp3 (Stored with Git LFS) Normal file

Binary file not shown.

BIN
heidi-sounds/bg3/Hahaha.mp3 (Stored with Git LFS) Normal file

Binary file not shown.

BIN
heidi-sounds/bg3/Honk.mp3 (Stored with Git LFS) Normal file

Binary file not shown.

BIN
heidi-sounds/bg3/Start talking.mp3 (Stored with Git LFS) Normal file

Binary file not shown.

BIN
heidi-sounds/bg3/This group is full of weirdos.mp3 (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:75cc48c3a5dc85a8304b1194065ca2ab4c6c6b6295f0a55e2889c9925d005e38
size 1194504

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a4cb6bf7b7bfdd050ecf60bf05bc2a6a5ce9c0077e673a4cf97dbfb2ecafb9a1
size 688896

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:81e355325f1e0c54ad187ef6c3873f3dadc19b1dee42b65ee5c210696480e733
size 812079

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:81fb57e99dae06e799be43135acf2eb400096cf6d06fbdbf282b8825e93e3972
size 657071

BIN
heidi-sounds/drache/Hagebuddne.mkv (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0d6d41e2e33cde49b5c4a3e55a52f18506330b172a99ae708a58d60509151d6f
size 1054582

BIN
heidi-sounds/henri/Dusch dich.mkv (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9bbd10f725147df249ec92718b4559bdd8be7eca97e5d976f936b9d825afb5aa
size 1034217

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fb66307f990103230f9c63a27a21b2ebda924135ab330f5b595f5f37639e12b0
size 1144977

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0ede6a0d526f181785322cb87d1a27688a99f6f71fbd37d784e28c6641ebb70c
size 743647

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b0c9e2a5fc2e3529abd0e680b7a139b9239eae27f1e991b39f78187bb1a0fb88
size 939420

BIN
heidi-sounds/henri/Ich bin der Pablo.mkv (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ee3fdbcafcba7e23cc60a45a5b5cc433068d8ca97b0e4d4be668148b9dab167c
size 753935

BIN
heidi-sounds/henri/Kann ich behilflich sein.mkv (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e4f057443b96e7a8e38e5cf7186083e375378f54f2aa314891a64625b9b1ba61
size 1466235

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:be7a6ba0f90793ff595dd56d06196b9029984976a3d61e928cfc7fcec51741e6
size 240103

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0d333644b63155bb732937aadd4147fb67c77d623dc55af862ef5e8218b2ea61
size 591141

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c0d8740ef2a7dfea55bdabf7fb43c78ae63d2998f117bb49371e90c2897cfa65
size 1608532

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1f224f5643e99e4515a1d377295272a3f7e044db20b49ce246b50709ad632ff0
size 928159

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7d17a0f74141389e73a644546d050b57aa7ad7aec2a707f57dfebfed5256d565
size 1899825

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1cd9cf0792ed7bafd38c760606d28ab366ff4fb320e41151161bd95b5ecc6848
size 3682445

BIN
heidi-sounds/henri/Yakari.mkv (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:da44b79bef932c3387962d11534a49d6a53d90574891d7f96fa780a836ea9b71
size 1911644

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:44b776ebde62c761dcd60dbfb9d90eb7b4f76861a06a14876d55f6f41585dbd6
size 2104870

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cfbb90b495f0006302b94d23081959aa90881cd7c8326963d99bd7971b16053d
size 871686

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:269b4b0b2d0e7d6b830de04128eb9dc42a20e27f4ca025e43bde20c3f4b84ff9
size 18344

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1f3f55803a1933ec15d60471cc0f3513d0ba009f1b5d398cca27fcfea5f2fbf4
size 49007

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b9fe1a53ec2f0970c2a3f58e0734bbb5f4515d5be42ea4fb0f1062f1731e3d1e
size 2294454

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:41f86b71a9da251552f9aefd2603b8221c0066ae2a9156ee2642285a0a48c3bf
size 21591

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0e66e31216df98dc166528576f9d69f1a1b64ebb49b3790a56b285fee73b7a2e
size 1512452

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:804d0bba869c6961d6b9d53ad3cdceea3c4b5108b2cb3d10fb8525f9f93738b7
size 3277223

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0976ab71b0e76a100317e6fa98d3cf13d71f082ddaebcf482b0c73d35869add2
size 1827568

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5f3dc5640813d6948acddb0c7ac64af301afb55e7a9cc5b7b350a8a23e7c2328
size 1659634

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:db6440955702bb50f4325b6192335b36ba15d66bfb4d95de96cd2ffa044cb437
size 1373947

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d16638c23a33d57cbd3b039db6361ff1a2c01acd2e6d4b348aece0db9ef0b092
size 2051380

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e645dba27b5c2b63a2647396c8f6af8bf7c84c0c9a01e417fea95098244362a5
size 1342773

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a29dc73ec6a2dd9bf5ad4a0b906c2136601217fbff9659c544b03e5c5e2ba8de
size 1705496

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4fb141ec779838b335ed99fd5efd5a8bee940e38154d29bf8cee96b69176b025
size 658379

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7f149a7235c98c81279bd0c8196563fa4ba3fe35f1fcdb7b7ce7570d45224ea1
size 802502

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:11f2ea7cfcfc2470c62a868d504037eba69746b6eaf9449071ebba49933ede2f
size 1587716

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:bb6f7a6126ab28541a38e01a19458a02ae9c3ca66d704931de0373d1a366034e
size 3222195

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cf17f143fefe52227a9f60f2ca8a12f85288c6ccf9136053302392379d0b038f
size 1851666

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f5bb768b34bd64fb1804880c14b86f88a0960d581832f8b293ca191591a4b0f7
size 1381026

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:388600e942c443f545ed62172f1ac238c6a0046a67cb7478e56082902ad0f93a
size 1369564

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:370ce2efbbd1976b4ea6b7949f9b1c973fd4d2a13a291a7cba8458baf545935d
size 855748

BIN
heidi-sounds/tit/Ab in Knast.mkv (Stored with Git LFS) Normal file

Binary file not shown.

BIN
heidi-sounds/tit/Bitconnect.mkv (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ead05e566688064e5339b21de1853dc4ae826bb5a37d071e4f92115ac901e21e
size 1096640

BIN
heidi-sounds/vinz/VINZENT.mkv (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:128296c98fadfcad218b03e4e682acfe37235b1bbfc3c4542db36adec0be443a
size 30877

184
heidi_client.py Normal file
View File

@ -0,0 +1,184 @@
import asyncio
from configparser import ConfigParser
import os
from types import CoroutineType
from typing import Any, Callable, override
from discord import Message, VoiceState
from discord.activity import Activity
from discord.app_commands.tree import CommandTree
from discord.client import Client
from discord.enums import ActivityType, Status, StatusDisplayType
from discord.flags import Intents
from discord.member import Member
from discord.state import VoiceChannel
from heidi_config import USER_CONFIG_SCHEME, ConfigSection, FlagsConfigKey
from heidi_constants import CONFIGPATH, LINUS_GUILD, TEST_GUILD, USERCONFIGNAME
from heidi_helpers import create_author_based_message_predicate, play_voice_line_for_member
class HeidiClient(Client):
def __init__(self, *, intents: Intents):
client_activity: Activity = Activity(
name="GNTM",
url="https://www.joyn.de/serien/germanys-next-topmodel",
type=ActivityType.competing,
state="Nur eine kann es werden!",
# details="Details",
# platform="Platform",
status_display_type=StatusDisplayType.state,
)
super().__init__(
activity=client_activity,
status=Status.online,
intents=intents,
)
# Separate object that keeps all application command state
self.tree: CommandTree = CommandTree(self)
# Handle persistent user configuration
self.user_config: ConfigParser = ConfigParser()
if not os.path.exists(f"{CONFIGPATH}/{USERCONFIGNAME}"):
# Create the file
_ = open(f"{CONFIGPATH}/{USERCONFIGNAME}", "x")
_ = self.user_config.read(f"{CONFIGPATH}/{USERCONFIGNAME}")
self.update_to_default_user_config()
self.print_user_config()
# automatic actions on all messages
# on_message_triggers is a map with tuples of two functions: (predicate, action)
# the predicate receives the message as argument
# if the predicate is true the action is performed
self.on_message_triggers: dict[
Callable[[Message], bool],
Callable[[Message], CoroutineType[Any, Any, None]],
] = {
# lambda m: m.author.nick.lower() in self.models.get_in_names(): self.autoreact_to_girls,
create_author_based_message_predicate(["jeremy"]): self._autoreact_to_jeremy,
create_author_based_message_predicate(["kardashian", "jenner"]): self._autoreact_to_kardashian,
}
# automatic actions on voice state changes
# on_voice_state_triggers is a map with tuples of two functions: (predicate, action)
# the predicate receives the member, before- and after-state as arguments
# if the predicate is true, the action is performed
self.on_voice_state_triggers: dict[
Callable[[Member, VoiceState, VoiceState], bool],
Callable[[Member, VoiceState, VoiceState], CoroutineType[Any, Any, None]],
] = {
lambda member, before, after: before.channel != after.channel
and after.channel is not None
and isinstance(after.channel, VoiceChannel): self._play_entrance_sound,
}
# Synchronize commands to guilds
@override
async def setup_hook(self) -> None:
self.tree.copy_global_to(guild=LINUS_GUILD)
_ = await self.tree.sync(guild=LINUS_GUILD)
self.tree.copy_global_to(guild=TEST_GUILD)
_ = await self.tree.sync(guild=TEST_GUILD)
def update_to_default_user_config(self) -> None:
"""
Adds config keys to the config, if they don't exist yet.
This writes the user config file.
"""
for section, keys in USER_CONFIG_SCHEME.items():
if section not in self.user_config:
print(f"Adding section {section} to {CONFIGPATH}/{USERCONFIGNAME}")
self.user_config[section] = dict()
for key, default in keys:
if key not in self.user_config[section].keys():
print(f"Adding key {key} with default value {default} to section {section}")
self.user_config[section][key] = default
self.write_user_config()
def print_user_config(self) -> None:
"""
Print the current user config from memory.
This does not read the user config file.
"""
print("Heidi User Config:\n")
for section in self.user_config.sections():
print(f"[{section}]")
for key in self.user_config[section]:
print(f"{key}={self.user_config[section][key]}")
print("")
def write_user_config(self) -> None:
"""
Write the current configuration to disk.
"""
if not os.path.exists(f"{CONFIGPATH}/{USERCONFIGNAME}"):
print(f"Error: {CONFIGPATH}/{USERCONFIGNAME} doesn't exist!")
return
print(f"Writing {CONFIGPATH}/{USERCONFIGNAME}")
with open(f"{CONFIGPATH}/{USERCONFIGNAME}", "w") as file:
self.user_config.write(file)
# Automatic Actions ------------------------------------------------------------------------------
@staticmethod
async def _autoreact_to_jeremy(message: Message) -> None:
"""
🧀 Jeremy.
This function is set in on_message_triggers and triggered by the on_message event.
"""
await message.add_reaction("🧀")
@staticmethod
async def _autoreact_to_kardashian(message: Message) -> None:
"""
💄 Kardashian.
This function is set in on_message_triggers and triggered by the on_message event.
"""
await message.add_reaction("💄")
async def _play_entrance_sound(
self,
member: Member,
before: VoiceState,
after: VoiceState,
) -> None:
"""
Play a sound when a member joins a voice channel (and another member is present).
This function is set in on_voice_state_triggers and triggered by the on_voice_state_update event.
"""
disable_join_sound_if_alone: bool = self.user_config[ConfigSection.FLAGS.value][
FlagsConfigKey.DISABLE_JOIN_SOUND_IF_ALONE.value
] == str(True)
# Don't play anything when no other users are present
if (
disable_join_sound_if_alone
and member.voice is not None
and member.voice.channel is not None
and len(member.voice.channel.members) <= 1
):
print("Not playing entrance sound, as no other members are present")
return
soundpath: str | None = self.user_config["ENTRANCE.SOUND"].get(member.name, None)
if soundpath is None:
print(f"User {member.name} has not set an entrance sound")
return
board, sound = soundpath.split("/")
# Wait a bit to not have simultaneous joins
await asyncio.sleep(1)
await play_voice_line_for_member(None, member, board, sound)

19
heidi_config.py Normal file
View File

@ -0,0 +1,19 @@
from enum import Enum
class ConfigSection(Enum):
FLAGS = "FLAGS"
ENTRANCE_SOUND = "ENTRANCE.SOUND"
class FlagsConfigKey(Enum):
DISABLE_JOIN_SOUND_IF_ALONE = "disable_join_sound_if_alone"
# NOTE: This is the default configuration scheme
USER_CONFIG_SCHEME: dict[str, list[tuple[str, str]]] = {
ConfigSection.FLAGS.value: [
(FlagsConfigKey.DISABLE_JOIN_SOUND_IF_ALONE.value, str(True)),
],
ConfigSection.ENTRANCE_SOUND.value: [],
}

28
heidi_constants.py Normal file
View File

@ -0,0 +1,28 @@
import os
from discord.object import Object
from dotenv import load_dotenv
# This is run when this file is imported
HAS_VARIABLES: bool = load_dotenv()
print("Debug: Importing heidi_constants.py")
# =========================================================================== #
# =========================================================================== #
DOCKER: bool = os.getenv("DOCKER") == str(True)
# =========================================================================== #
# =========================================================================== #
# Constants
CONFIGPATH: str = "/config" if DOCKER else "."
USERCONFIGNAME: str = "Heidi_User.conf"
SOUNDDIR: str = "/sounds" if DOCKER else "./heidi-sounds"
# IDs of the servers Heidi is used on
LINUS_GUILD: Object = Object(id=431154792308408340)
TEST_GUILD: Object = Object(id=821511861178204161)
# Channel IDs
HEIDI_SPAM_ID: int = 822223476101742682

128
heidi_helpers.py Normal file
View File

@ -0,0 +1,128 @@
import asyncio
import functools
from types import CoroutineType
from typing import Any, Callable
from discord import Interaction, Message, VoiceChannel, Member, VoiceClient
from discord.player import FFmpegPCMAudio
from heidi_constants import SOUNDDIR
print("Debug: Importing heidi_helpers.py")
# Checks -----------------------------------------------------------------------------------------
# 1. @enforce_channel(ID) is added to a function, which evaluates to decorate with the channel_id in its closure
# 2. The function is passed to decorate(function),
def enforce_channel(channel_id: int):
"""
Only run a function if called from the specified voice channel.
"""
def decorate(function: Callable[..., CoroutineType[Any, Any, None]]):
@functools.wraps(function)
async def wrapped(*args: *tuple[Interaction, *tuple[Any, ...]], **kwargs: int):
"""
Sends an interaction response if the interaction is not triggered from the heidi_spam channel.
"""
interaction: Interaction = args[0]
# Do not call the decorated function if the channel_id doesn't match
if not interaction.channel_id == channel_id:
_ = await interaction.response.send_message("Heidi sagt: Geh in heidi_spam du dulli", ephemeral=True)
return
await function(*args, **kwargs)
return wrapped
return decorate
# Reactions --------------------------------------------------------------------------------------
def create_author_based_message_predicate(names: list[str]) -> Callable[[Message], bool]:
"""
Create a predicate that determines if a message was written by a certain author.
For usage with on_message_triggers.
"""
def handler(message: Message) -> bool:
for name in names:
if (
isinstance(message.author, Member)
and message.author.nick is not None
and name.lower() in message.author.nick.lower()
):
return True
return False
return handler
# Sounds -----------------------------------------------------------------------------------------
async def play_voice_line(
interaction: Interaction | None,
voice_channel: VoiceChannel,
board: str,
sound: str,
) -> None:
"""
Play a voice line in the specified channel.
"""
try:
# Check if the file exists
_ = open(f"{SOUNDDIR}/{board}/{sound}")
except IOError:
print(f"Error: Invalid soundfile {SOUNDDIR}/{board}/{sound}!")
if interaction is not None:
_ = await interaction.response.send_message(
f'Heidi sagt: "{board}/{sound}" kanninich finden bruder', ephemeral=True
)
return
if interaction is not None:
_ = await interaction.response.send_message(f'Heidi sagt: "{board}/{sound}"', ephemeral=True)
# TODO: Normalize volume when playing
audio_source: FFmpegPCMAudio = FFmpegPCMAudio(f"{SOUNDDIR}/{board}/{sound}") # only works from docker
voice_client: VoiceClient = await voice_channel.connect()
voice_client.play(audio_source)
while voice_client.is_playing():
await asyncio.sleep(1)
await voice_client.disconnect()
async def play_voice_line_for_member(
interaction: Interaction | None,
member: Member,
board: str,
sound: str,
) -> None:
"""
Play a voice line in the member's current channel.
"""
# Member needs to be in voice channel to hear audio (Heidi needs to know the channel to join)
if (
member is None
or member.voice is None
or member.voice.channel is None
or not isinstance(member.voice.channel, VoiceChannel)
):
print("User not in (valid) voice channel!")
if interaction is not None:
_ = await interaction.response.send_message("Heidi sagt: Komm in den Channel!", ephemeral=True)
return
voice_channel: VoiceChannel = member.voice.channel
await play_voice_line(interaction, voice_channel, board, sound)

View File

@ -1,9 +0,0 @@
#!/bin/sh
cd /home/christoph/HeidiBot
git pull
docker pull registry.gitlab.com/churl/heidibot
docker container rm -f heidibot
docker run -d --env-file /home/christoph/HeidiBot/.env --mount src=/home/christoph/HeidiBot/voicelines,target=/sounds,type=bind --name heidibot registry.gitlab.com/churl/heidibot
docker image prune -f

View File

@ -1,33 +0,0 @@
#!/usr/bin/env python3
import requests
import re
from bs4 import BeautifulSoup
class Models:
def __init__(self):
url_girls = "https://www.prosieben.de/tv/germanys-next-topmodel/models"
html_girls = requests.get(url_girls)
soup_girls = BeautifulSoup(html_girls.text, "html.parser")
girls_in = soup_girls.findAll("a", class_="candidate-in")
girls_out = soup_girls.findAll("a", class_="candidate-out")
self.girls_in = {girl.get("title").lower(): girl for girl in girls_in}
self.girls_out = {girl.get("title").lower(): girl for girl in girls_out}
self.girls = {**self.girls_in, **self.girls_out}
def get_in_names(self):
return self.girls_in.keys()
def get_out_names(self):
return self.girls_out.keys()
def get_image(self, name):
style = self.girls[name.lower()].find("figure", class_="teaser-img").get("style")
url = re.search(r"url\(.*\);", style).group()
return url[4:-9] + "562x996" # increase resolution

2
pyproject.toml Normal file
View File

@ -0,0 +1,2 @@
[tool.black]
line-length = 120

View File

@ -3,12 +3,3 @@ rich
discord.py # maintained again
pynacl # voice support
python-dotenv # discord guild secrets
# Webscraping
# requests
# beautifulsoup4
# Textgeneration
# torch
# numpy
# nltk

2
setup.cfg Normal file
View File

@ -0,0 +1,2 @@
[flake8]
max-line-length = 120

View File

@ -1,44 +0,0 @@
#!/usr/bin/env python3
from rich.traceback import install
install()
from abc import ABC, abstractmethod
# In Python it is generally not needed to use abstract classes, but I wanted to do it safely
class textgen(ABC):
@abstractmethod
def init(self, filename):
"""
filename - The file (same directory as textgen.py) that contains the training text
"""
raise NotImplementedError("Can't use abstract class")
@abstractmethod
def load(self):
"""
Load the trained markov chain from a precomputed file
"""
raise NotImplementedError("Can't use abstract class")
@abstractmethod
def train(self):
"""
Generate the markov chain, uses prefix length defined in init()
"""
raise NotImplementedError("Can't use abstract class")
@abstractmethod
def generate_sentence(self):
"""
Generate a series of words/characters until a . is generated
"""
raise NotImplementedError("Can't use abstract class")
@abstractmethod
def complete_sentence(self, prefix):
"""
Generate the rest of a sentence for a given beginning
"""
raise NotImplementedError("Can't use abstract class")

View File

@ -1,303 +0,0 @@
#!/usr/bin/env python3
import re, random
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from textgen import textgen
from torch import nn, optim
from rich.traceback import install
install()
# Model =======================================================================================
# https://towardsdatascience.com/text-generation-with-bi-lstm-in-pytorch-5fda6e7cc22c
# Embedding -> Bi-LSTM -> LSTM -> Linear
class Model(nn.ModuleList):
def __init__(self, args, device):
super(Model, self).__init__()
self.device = device
self.batch_size = args["batch_size"]
self.hidden_dim = args["hidden_dim"]
self.input_size = args["vocab_size"]
self.num_classes = args["vocab_size"]
self.sequence_len = args["window"]
# Dropout
self.dropout = nn.Dropout(0.25) # Don't need to set device for the layers as we transfer the whole model later
# Embedding layer
self.embedding = nn.Embedding(self.input_size, self.hidden_dim, padding_idx=0)
# Bi-LSTM
# Forward and backward
self.lstm_cell_forward = nn.LSTMCell(self.hidden_dim, self.hidden_dim)
self.lstm_cell_backward = nn.LSTMCell(self.hidden_dim, self.hidden_dim)
# LSTM layer
self.lstm_cell = nn.LSTMCell(self.hidden_dim * 2, self.hidden_dim * 2)
# Linear layer
self.linear = nn.Linear(self.hidden_dim * 2, self.num_classes)
def forward(self, x):
# Bi-LSTM
# hs = [batch_size x hidden_size]
# cs = [batch_size x hidden_size]
hs_forward = torch.zeros(x.size(0), self.hidden_dim).to(self.device) # Need to specify device here as this is not part of the model directly
cs_forward = torch.zeros(x.size(0), self.hidden_dim).to(self.device)
hs_backward = torch.zeros(x.size(0), self.hidden_dim).to(self.device)
cs_backward = torch.zeros(x.size(0), self.hidden_dim).to(self.device)
# LSTM
# hs = [batch_size x (hidden_size * 2)]
# cs = [batch_size x (hidden_size * 2)]
hs_lstm = torch.zeros(x.size(0), self.hidden_dim * 2).to(self.device)
cs_lstm = torch.zeros(x.size(0), self.hidden_dim * 2).to(self.device)
# Weights initialization
torch.nn.init.kaiming_normal_(hs_forward)
torch.nn.init.kaiming_normal_(cs_forward)
torch.nn.init.kaiming_normal_(hs_backward)
torch.nn.init.kaiming_normal_(cs_backward)
torch.nn.init.kaiming_normal_(hs_lstm)
torch.nn.init.kaiming_normal_(cs_lstm)
# From idx to embedding
out = self.embedding(x)
# Prepare the shape for LSTM Cells
out = out.view(self.sequence_len, x.size(0), -1)
forward = []
backward = []
# Unfolding Bi-LSTM
# Forward
for i in range(self.sequence_len):
hs_forward, cs_forward = self.lstm_cell_forward(out[i], (hs_forward, cs_forward))
forward.append(hs_forward)
# Backward
for i in reversed(range(self.sequence_len)):
hs_backward, cs_backward = self.lstm_cell_backward(out[i], (hs_backward, cs_backward))
backward.append(hs_backward)
# LSTM
for fwd, bwd in zip(forward, backward):
input_tensor = torch.cat((fwd, bwd), 1)
hs_lstm, cs_lstm = self.lstm_cell(input_tensor, (hs_lstm, cs_lstm))
# Last hidden state is passed through a linear layer
out = self.linear(hs_lstm)
return out
# =============================================================================================
class LSTMTextGenerator(textgen):
def __init__(self, windowsize):
self.windowsize = windowsize # We slide a window over the character sequence and look at the next letter,
# similar to the Markov chain order
def init(self, filename):
self.filename = filename
# Use this to generate one hot vector and filter characters
self.letters = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "ä", "ö", "ü", ".", " "]
with open(f"./textfiles/{filename}.txt", "r") as file:
lines = [line.lower() for line in file.readlines()] # lowercase list
text = " ".join(lines) # single string
self.charbase = [char for char in text if char in self.letters] # list of characters
# Select device
if torch.cuda.is_available():
dev = "cuda:0"
print("Selected GPU for LSTM")
else:
dev = "cpu"
print("Selected CPU for LSTM")
self.device = torch.device(dev)
# Init model
self.args = {
"window": self.windowsize,
"hidden_dim": 128,
"vocab_size": len(self.letters),
"batch_size": 128,
"learning_rate": 0.0005,
"num_epochs": 100
}
self.model = Model(self.args, self.device)
self.model.to(self.device) # All model layers need to use the correct tensors (cpu/gpu)
# Needed for both training and generation
self.__generate_char_sequences()
# Helper shit
def __char_to_idx(self, char):
return self.letters.index(char)
def __idx_to_char(self, idx):
return self.letters[idx]
def __generate_char_sequences(self):
# Example
# [[21, 20, 15],
# [12, 12, 14]]
prefixes = []
# Example
# [[1],
# [4]]
suffixes = []
print("Generating LSTM char sequences...")
for i in range(len(self.charbase) - self.windowsize - 1):
prefixes.append([self.__char_to_idx(char) for char in self.charbase[i:i+self.windowsize]])
suffixes += [self.__char_to_idx(char) for char in self.charbase[i+self.windowsize+1]] # Bit stupid wrapping this in a list but removes possible type error
# Enter numpy terretory NOW
self.prefixes = np.array(prefixes)
self.suffixes = np.array(suffixes)
print(f"Prefixes shape: {self.prefixes.shape}")
print(f"Suffixes shape: {self.suffixes.shape}")
print("Completed.")
# Interface shit
# TODO: Also save/load generated prefixes
def load(self):
print(f"Loading \"{self.filename}\" LSTM model with {len(self.charbase)} characters from file.")
self.model.load_state_dict(torch.load(f"weights/{self.filename}_lstm_model.pt"))
def train(self):
print(f"Training \"{self.filename}\" LSTM model with {len(self.charbase)} characters.")
# Optimizer initialization, RMSprop for RNN
optimizer = optim.RMSprop(self.model.parameters(), lr=self.args["learning_rate"])
# Defining number of batches
num_batches = int(len(self.prefixes) / self.args["batch_size"])
# Set model in training mode
self.model.train()
losses = []
# Training pahse
for epoch in range(self.args["num_epochs"]):
# Mini batches
for i in range(num_batches):
# Batch definition
try:
x_batch = self.prefixes[i * self.args["batch_size"]:(i + 1) * self.args["batch_size"]]
y_batch = self.suffixes[i * self.args["batch_size"]:(i + 1) * self.args["batch_size"]]
except:
x_batch = self.prefixes[i * self.args["batch_size"]:]
y_batch = self.suffixes[i * self.args["batch_size"]:]
# Convert numpy array into torch tensors
x = torch.from_numpy(x_batch).type(torch.long).to(self.device)
y = torch.from_numpy(y_batch).type(torch.long).to(self.device)
# Feed the model
y_pred = self.model(x)
# Loss calculation
loss = F.cross_entropy(y_pred, y.squeeze()).to(self.device)
losses += [loss.item()]
# Clean gradients
optimizer.zero_grad()
# Calculate gradientes
loss.backward()
# Updated parameters
optimizer.step()
print("Epoch: %d , loss: %.5f " % (epoch, loss.item()))
torch.save(self.model.state_dict(), f"weights/{self.filename}_lstm_model.pt")
print(f"Saved \"{self.filename}\" LSTM model to file")
plt.plot(np.arange(0, len(losses)), losses)
plt.title(self.filename)
plt.show()
def generate_sentence(self):
# Randomly is selected the index from the set of sequences
start = np.random.randint(0, len(self.prefixes)-1)
# Convert back to string to match complete_sentence
pattern = "".join([self.__idx_to_char(char) for char in self.prefixes[start]]) # random sequence from the training text
return self.complete_sentence(pattern)
def complete_sentence(self, prefix):
print("Prefix:", prefix)
# Convert to indexes np.array
pattern = np.array([self.__char_to_idx(char) for char in prefix])
# Set the model in evalulation mode
self.model.eval()
# Define the softmax function
softmax = nn.Softmax(dim=1).to(self.device)
# In full_prediction we will save the complete prediction
full_prediction = pattern.copy()
print("Generating sentence...")
# Predic the next characters one by one, append chars to the starting pattern until . is reached, max 500 iterations
for _ in range(500):
# the numpy patterns is transformed into a tesor-type and reshaped
pattern = torch.from_numpy(pattern).type(torch.long).to(self.device)
pattern = pattern.view(1,-1)
# make a prediction given the pattern
prediction = self.model(pattern)
# it is applied the softmax function to the predicted tensor
prediction = softmax(prediction)
# the prediction tensor is transformed into a numpy array
prediction = prediction.squeeze().detach().cpu().numpy()
# it is taken the idx with the highest probability
arg_max = np.argmax(prediction)
# the current pattern tensor is transformed into numpy array
pattern = pattern.squeeze().detach().cpu().numpy()
# the window is sliced 1 character to the right
pattern = pattern[1:]
# the new pattern is composed by the "old" pattern + the predicted character
pattern = np.append(pattern, arg_max)
# the full prediction is saved
full_prediction = np.append(full_prediction, arg_max)
# Stop on . character
if self.__idx_to_char(arg_max) == ".":
break
full_prediction = "".join([self.__idx_to_char(value) for value in full_prediction])
print("Generated:", full_prediction)
return full_prediction

View File

@ -1,82 +0,0 @@
#!/usr/bin/env python3
import re
import random
from textgen import textgen
from rich.traceback import install
install()
# NOTE: This is word based, not character based
# TODO: Serialize and save/load model (don't train on the server)
# TODO: Maybe extract sentence beginnings and use them as starters?
class MarkovTextGenerator(textgen):
# The greater the order (prefix length), the lesser the variation in generation, but the better the sentences (generally).
# If the prefix length is high there are less options to choose from, so the sentences are very close to the training text.
def __init__(self, order): # Set order here for better interface (only needed for markov model)
self.order = order
def init(self, filename): # Filename is needed for every type of model so it's part of the interface
with open(f"./textfiles/{filename}.txt", "r") as file:
# Remove all characters except a-zäöüß'.,
self.wordbase = re.sub(r"[^a-zäöüß'.,]+", " ", file.read().lower()).split()
self.word_table = dict()
def load(self):
print(f"Loaded Markov chain of order {self.order} with {len(self.wordbase)} words from file.")
def train(self):
print(f"Training Markov chain of order {self.order} with {len(self.wordbase)} words.")
# init the frequencies
for i in range(len(self.wordbase) - self.order - 1): # Look at every word in range
prefix = tuple(self.wordbase[i:i+self.order]) # Look at the next self.order words from current position
suffix = self.wordbase[i+self.order] # The next word is the suffix
if prefix not in self.word_table: # New option wooo
self.word_table[prefix] = []
# if suffix not in self.table[prefix]: # disable for probabilities: if the suffixes are in the list multiple times they are more common
self.word_table[prefix].append(suffix)
print(f"Generated suffixes for {len(self.word_table)} prefixes.")
# def generate_random(self, n):
# fword = random.choice(list(self.word_table.keys())) # Random first word
# output = [*fword]
# for _ in range(self.order, n):
# output.append(self.generate_word_by_word(tuple(output[-self.order :])))
# return output
def generate_suffix_for_prefix(self, prefix: tuple):
if len(prefix) > self.order: # In this case we look at the last self.order elements of prefix
prefix = prefix[len(prefix)-self.order-1:-1]
if prefix not in self.word_table: # In this case we need to choose a possible suffix from the last word in the prefix (if prefix too short for example)
print(f"Prefix {prefix} not in table")
for key in self.word_table.keys():
if key[-1] == prefix[-1]:
return random.choice(self.word_table[key])
return random.choice(self.word_table[prefix])
def generate_sentence(self):
fword = random.choice(list(self.word_table.keys()))
output = [*fword]
while "." not in output[-1]:
output.append(self.generate_suffix_for_prefix(tuple(output[-self.order:])))
return output
def complete_sentence(self, prefix):
output = [*prefix]
while "." not in output[-1]:
output.append(self.generate_suffix_for_prefix(tuple(output[-self.order:])))
return output