Help Needed: Fixing Conversation between Chatbots
I am currently working on a project that involves creating a conversation between three chatbots using OpenAI’s GPT-3.5 Turbo model. I have encountered a problem where Model 2, which is supposed to respond to Model 1’s question, is receiving the “ask a question” command instead.
Here is the code I have so far:
from collections import deque
import openai
import pyttsx3
import re
# Set OpenAI API key
openai.api_key = "your_openai_api_key"
# Maximum number of characters in memory
MAX_MEMORY_CHARACTERS = 15000
# Maximum number of messages in memory
MAX_MEMORY_MESSAGES = 100
# Class to store a limited amount of messages with a limited number of characters
class LimitedMemoryDeque(deque):
def __init__(self, maxlen, max_characters):
super().__init__(maxlen=maxlen)
self.max_characters = max_characters
self.total_characters = 0
def append(self, item):
item["content"] = item["content"].replace("As an AI language model, ", "")
super().append(item)
self.total_characters += len(item["content"])
while self.total_characters > self.max_characters:
removed_item = self.popleft()
self.total_characters -= len(removed_item["content"])
# Function to generate a response using the given memory, messages, and model information
def generate_response(memory, user_message, previous_message, model_info):
conversation = [{"role": "system", "content": f"I am {model_info['name']}, Model {model_info['number']}. My task is to {model_info['role']} and address other models."}] + list(memory) + [user_message, previous_message]
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=conversation)
response_content = response['choices'][0]['message']['content']
response_content = re.sub(r'^.*?:\s*', '', response_content)
return response_content
# Function to convert text to speech
def speak(text, voice_id):
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[voice_id].id)
engine.say(text)
engine.runAndWait()
# Main function
def main():
memories = LimitedMemoryDeque(maxlen=MAX_MEMORY_MESSAGES, max_characters=MAX_MEMORY_CHARACTERS)
model_infos = [
{"number": 1, "name": "John", "role": "ask questions on the subject", "voice_id": 0},
{"number": 2, "name": "Sarah", "role": "answer John's questions.", "voice_id": 1},
{"number": 3, "name": "Michael", "role": "analyze Sarah's response and give another point of view on the subject.", "voice_id": 2},
]
subject = input("Enter a conversational subject for the chatbots to talk about: ")
john_message, sarah_message, michael_message = {"content": f"ask a question on the subject {subject}."}, {}, {}
while True:
responses = []
# Generate a response for each model
for model_info in model_infos:
user_message = {"role": "user", "content": f"{model_info['name']}, please {model_info['role'].split(' ')[0]}: {john_message.get('content', '')}{sarah_message.get('content', '')}{michael_message.get('content', '')}"}
previous_message = {"role": "assistant", "content": ""}
model_response = generate_response(memories, user_message, previous_message, model_info)
memories.append({"role": "assistant", "content": f"Model {model_info['number']}: {model_response}"})
responses.append(model_response)
john_message, sarah_message, michael_message = sarah_message, michael_message, john_message
for model_number, response in enumerate(responses):
print(f"{model_infos[model_number]['name']}: {response}")
speak(response, model_infos[model_number]['voice_id'])
if __name__ == "__main__":
main()
I would greatly appreciate any help in figuring out how to fix this issue so that Model 2 can correctly respond to Model 1’s question. Thank you in advance!