Skip to content

Commit 99fc34e

Browse files
authored
Merge pull request #259 from UWCS/summarise
summarise cooldown
2 parents fd19057 + bc6acca commit 99fc34e

File tree

3 files changed

+24
-8
lines changed

3 files changed

+24
-8
lines changed

cogs/commands/summarise.py

+18-8
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
mentions = AllowedMentions(everyone=False, users=False, roles=False, replied_user=True)
2020
model = "gpt-4o-mini"
2121

22+
23+
2224
def clean(msg, *prefixes):
2325
for pre in prefixes:
2426
msg = msg.strip().removeprefix(pre)
@@ -29,22 +31,30 @@ class Summarise(commands.Cog):
2931
def __init__(self, bot: Bot):
3032
self.bot = bot
3133
openai.api_key = CONFIG.OPENAI_API_KEY
32-
self.system_prompt = "People yap too much, I don't want to read all of it. In 200 words or less give me the gist of what is being said. Note that the messages are in reverse chronological order:"
3334

35+
def build_prompt(self, bullet_points, channel_name):
36+
37+
bullet_points = "Put it in bullet points for readability." if bullet_points else ""
38+
prompt = f"""People yap too much, I don't want to read all of it. The topic is something related to {channel_name}. In 2 sentences or less give me the gist of what is being said. {bullet_points} Note that the messages are in reverse chronological order:
39+
"""
40+
return prompt
41+
42+
@commands.cooldown(CONFIG.SUMMARISE_LIMIT, CONFIG.SUMMARISE_COOLDOWN * 60, commands.BucketType.channel)
3443
@commands.hybrid_command(help=LONG_HELP_TEXT, brief=SHORT_HELP_TEXT)
3544
async def tldr(
36-
self, ctx: Context, number_of_messages: int = 100):
45+
self, ctx: Context, number_of_messages: int = 100, bullet_point_output: bool = False ):
3746
number_of_messages = 400 if number_of_messages > 400 else number_of_messages
38-
47+
48+
3949
# avoid banned users
4050
if not await is_author_banned_openai(ctx):
4151
await ctx.send("You are banned from OpenAI!")
4252
return
4353

4454
# get the last "number_of_messages" messages from the current channel and build the prompt
45-
curr_channel = ctx.guild.get_channel(ctx.channel.id)
46-
messages = curr_channel.history(limit=number_of_messages)
47-
messages = await self.create_message(messages)
55+
prompt = self.build_prompt(bullet_point_output, ctx.channel)
56+
messages = ctx.channel.history(limit=number_of_messages)
57+
messages = await self.create_message(messages, prompt)
4858

4959
# send the prompt to the ai overlords to process
5060
async with ctx.typing():
@@ -68,9 +78,9 @@ async def dispatch_api(self, messages) -> Optional[str]:
6878
reply = clean(reply, "Apollo: ", "apollo: ", name)
6979
return reply
7080

71-
async def create_message(self, message_chain):
81+
async def create_message(self, message_chain, prompt):
7282
# get initial prompt
73-
initial = self.system_prompt + "\n"
83+
initial = prompt + "\n"
7484

7585
# for each message, append it to the prompt as follows --- author : message \n
7686
async for msg in message_chain:

config.example.yaml

+4
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ config:
3535
portainer_api_key: portainer
3636
# Liege Chancellor User ID
3737
liege_chancellor_id: 1234
38+
# Summarise Use Limit
39+
summarise_limit: 3
40+
# Summarise Cooldown Period (minutes)
41+
summarise_cooldown: 10
3842
# whether to load general.txt and markov chains
3943
markov_enabled: False
4044

config/config.py

+2
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ def __init__(self, filepath: str):
2727
self.AI_SYSTEM_PROMPT: str = parsed.get("ai_system_prompt")
2828
self.PORTAINER_API_KEY: str = parsed.get("portainer_api_key")
2929
self.LIEGE_CHANCELLOR_ID: int = parsed.get("liege_chancellor_id")
30+
self.SUMMARISE_LIMIT: int = parsed.get("summarise_limit")
31+
self.SUMMARISE_COOLDOWN: int = parsed.get("summarise_cooldown")
3032
self.MARKOV_ENABLED: bool = parsed.get("markov_enabled")
3133

3234
# Configuration

0 commit comments

Comments
 (0)