diff --git a/README.md b/README.md index 086e6c53..61713404 100644 --- a/README.md +++ b/README.md @@ -249,8 +249,9 @@ To discover how to create profiles for large-scale users, as well as how to visu ### Latest Updates -📢 Add a new feature to customize each agent's models, tools, and prompts, and refactor the interface to follow the PettingZoo style. - 📆 May 22, 2025 +📢 Support Interview Action for asking agents specific questions and getting answers. - 📆 June 2, 2025 +- Support customization of each agent's models, tools, and prompts; refactor the interface to follow the PettingZoo style. - 📆 May 22, 2025 - Refactor into the OASIS environment, publish camel-oasis on PyPI, and release the documentation. - 📆 April 24, 2025 - Support OPENAI Embedding model for Twhin-Bert Recommendation System. - 📆 March 25, 2025 - Updated social media links and QR codes in the README! Join OASIS & CAMEL on WeChat, X, Reddit, and Discord. - 📆 March 24, 2025 diff --git a/assets/wechatgroup.png b/assets/wechatgroup.png index 8e37d311..6a27870d 100644 Binary files a/assets/wechatgroup.png and b/assets/wechatgroup.png differ diff --git a/docs/cookbooks/twitter_interview.mdx b/docs/cookbooks/twitter_interview.mdx new file mode 100644 index 00000000..81b76c29 --- /dev/null +++ b/docs/cookbooks/twitter_interview.mdx @@ -0,0 +1,326 @@ +--- +title: 'Interview' +description: 'Learn how to conduct interviews with AI agents in Twitter simulations using the INTERVIEW action type' +--- + +# Interview + +This cookbook demonstrates how to use the INTERVIEW action type to conduct interviews with AI agents in a Twitter simulation. The interview functionality allows you to ask specific questions to agents and collect their responses, which is useful for research, opinion polling, and understanding agent behaviors. + +## Overview + +The INTERVIEW action type enables you to: +- Ask specific questions to individual agents +- Collect structured responses from agents +- Store interview data in the database for analysis +- Conduct interviews alongside regular social media interactions + +## Key Features + +- **Manual Interview Actions**: Use `ManualAction` with `ActionType.INTERVIEW` to conduct interviews +- **Automatic Response Collection**: The system automatically collects and stores agent responses +- **Database Storage**: All interview data is stored in the trace table for later analysis +- **Concurrent Execution**: Interviews can be conducted alongside other social media actions + +## Important Note + +**Do NOT include `ActionType.INTERVIEW` in the `available_actions` list** when creating your agent graph. The interview action is designed to be used only manually by researchers/developers, not automatically selected by LLM agents. Including it in `available_actions` would allow agents to interview each other automatically, which is typically not desired behavior. + +## Complete Example + +```python +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import asyncio +import os +import sqlite3 +import json + +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + +import oasis +from oasis import (ActionType, LLMAction, ManualAction, + generate_twitter_agent_graph) + + +async def main(): + openai_model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + ) + + # Define the available actions for the agents + # Note: INTERVIEW is NOT included here to prevent LLM from automatically selecting it + # INTERVIEW can still be used manually via ManualAction + available_actions = [ + ActionType.CREATE_POST, + ActionType.LIKE_POST, + ActionType.REPOST, + ActionType.FOLLOW, + ActionType.DO_NOTHING, + ActionType.QUOTE_POST, + # ActionType.INTERVIEW, # DO NOT include this - interviews should be manual only + ] + + agent_graph = await generate_twitter_agent_graph( + profile_path=("data/twitter_dataset/anonymous_topic_200_1h/" + "False_Business_0.csv"), + model=openai_model, + available_actions=available_actions, + ) + + # Define the path to the database + db_path = "./data/twitter_simulation.db" + + # Delete the old database + if os.path.exists(db_path): + os.remove(db_path) + + # Make the environment + env = oasis.make( + agent_graph=agent_graph, + platform=oasis.DefaultPlatformType.TWITTER, + database_path=db_path, + ) + + # Run the environment + await env.reset() + + # First timestep: Agent 0 creates a post + actions_1 = {} + actions_1[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Earth is flat."}) + await env.step(actions_1) + + # Second timestep: Let some agents respond with LLM actions + actions_2 = { + agent: LLMAction() + # Activate 5 agents with id 1, 3, 5, 7, 9 + for _, agent in env.agent_graph.get_agents([1, 3, 5, 7, 9]) + } + await env.step(actions_2) + + # Third timestep: Agent 1 creates a post, and we interview Agent 0 + actions_3 = {} + actions_3[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Earth is not flat."}) + + # Create an interview action to ask Agent 0 about their views + actions_3[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "What do you think about the shape of the Earth? Please explain your reasoning."}) + + await env.step(actions_3) + + # Fourth timestep: Let some other agents respond + actions_4 = { + agent: LLMAction() + for _, agent in env.agent_graph.get_agents([2, 4, 6, 8, 10]) + } + await env.step(actions_4) + + # Fifth timestep: Interview multiple agents + actions_5 = {} + actions_5[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "Why do you believe the Earth is not flat?"}) + + actions_5[env.agent_graph.get_agent(2)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "What are your thoughts on the debate about Earth's shape?"}) + + await env.step(actions_5) + + # Sixth timestep: Final LLM actions for remaining agents + actions_6 = { + agent: LLMAction() + for _, agent in env.agent_graph.get_agents([3, 5, 7, 9]) + } + await env.step(actions_6) + + # Close the environment + await env.close() + + # visualize the interview results + print("\n=== Interview Results ===") + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + # Here we query all interview records from the database + # We use ActionType.INTERVIEW.value as the query condition to get all interview records + # Each record contains user ID, interview information (in JSON format), and creation timestamp + cursor.execute(""" + SELECT user_id, info, created_at + FROM trace + WHERE action = ? + """, (ActionType.INTERVIEW.value,)) + + # This query retrieves all interview records from the trace table + # - user_id: the ID of the agent who was interviewed + # - info: JSON string containing interview details (prompt, response, etc.) + # - created_at: timestamp when the interview was conducted + # We'll parse this data below to display the interview results + for user_id, info_json, timestamp in cursor.fetchall(): + info = json.loads(info_json) + print(f"\nAgent {user_id} (Timestep {timestamp}):") + print(f"Prompt: {info.get('prompt', 'N/A')}") + print(f"Interview ID: {info.get('interview_id', 'N/A')}") + print(f"Response: {info.get('response', 'N/A')}") + + conn.close() + + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## How It Works + +### 1. Setup and Configuration + +**Important**: Do NOT include `ActionType.INTERVIEW` in your available actions list. Interviews should only be conducted manually: + +```python +# Correct configuration - INTERVIEW is NOT included +available_actions = [ + ActionType.CREATE_POST, + ActionType.LIKE_POST, + ActionType.REPOST, + ActionType.FOLLOW, + ActionType.DO_NOTHING, + ActionType.QUOTE_POST, + # ActionType.INTERVIEW, # DO NOT include - interviews are manual only +] +``` + +This prevents LLM agents from automatically selecting the interview action during their decision-making process. Interviews can still be conducted using `ManualAction`. + +### 2. Conducting Interviews + +Use `ManualAction` with `ActionType.INTERVIEW` to conduct interviews: + +```python +# Single interview +interview_action = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "What are your thoughts on climate change?"}) + +actions = {env.agent_graph.get_agent(0): interview_action} +await env.step(actions) +``` + +### 3. Multiple Interviews in One Step + +You can interview multiple agents simultaneously: + +```python +actions = {} +actions[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "Why do you believe the Earth is not flat?"}) + +actions[env.agent_graph.get_agent(2)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "What are your thoughts on the debate about Earth's shape?"}) + +await env.step(actions) +``` + +### 4. Mixing Interviews with Other Actions + +Interviews can be conducted alongside regular social media actions: + +```python +actions = {} +# Regular post creation +actions[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Earth is not flat."}) + +# Interview action +actions[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "What do you think about the shape of the Earth?"}) + +await env.step(actions) +``` + +## Data Storage and Retrieval + +### Database Schema + +Interview data is stored in the `trace` table with the following structure: +- `user_id`: The ID of the interviewed agent +- `action`: Set to `ActionType.INTERVIEW.value` +- `info`: JSON string containing interview details +- `created_at`: Timestamp of the interview + +### Retrieving Interview Results + +```python +import sqlite3 +import json + +conn = sqlite3.connect(db_path) +cursor = conn.cursor() + +# Query all interview records +cursor.execute(""" + SELECT user_id, info, created_at + FROM trace + WHERE action = ? +""", (ActionType.INTERVIEW.value,)) + +for user_id, info_json, timestamp in cursor.fetchall(): + info = json.loads(info_json) + print(f"Agent {user_id}: {info.get('response', 'N/A')}") + +conn.close() +``` + +### Interview Data Structure + +Each interview record contains: +- `prompt`: The question asked to the agent +- `interview_id`: Unique identifier for the interview +- `response`: The agent's response to the question + +## Best Practices + +### 1. Strategic Interview Timing + +Conduct interviews at strategic points in your simulation: +- After controversial posts to gauge reactions +- Before and after significant events +- At regular intervals to track opinion changes + +### 2. Question Design + +Design effective interview questions: +- Be specific and clear +- Avoid leading questions +- Ask open-ended questions for richer responses + +```python +# Good examples +"What are your thoughts on renewable energy?" +"How do you feel about the recent policy changes?" +"Can you explain your reasoning behind your last post?" + +# Avoid +"Don't you think renewable energy is great?" # Leading +"Yes or no: Do you like cats?" # Too restrictive +``` diff --git a/docs/docs.json b/docs/docs.json index 92c743ca..ee4c4d83 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -46,7 +46,8 @@ "cookbooks/reddit_simulation", "cookbooks/sympy_tools_simulation", "cookbooks/search_tools_simulation", - "cookbooks/custom_prompt_simulation" + "cookbooks/custom_prompt_simulation", + "cookbooks/twitter_interview" ] }, { diff --git a/docs/key_modules/actions.mdx b/docs/key_modules/actions.mdx index a8d0b9de..27945117 100644 --- a/docs/key_modules/actions.mdx +++ b/docs/key_modules/actions.mdx @@ -107,6 +107,7 @@ OASIS provides a comprehensive set of actions that simulate real social media be | `REFRESH` | Refresh the timeline to get recommended posts | | `DO_NOTHING` | Perform no action (pass the turn) | | `PURCHASE_PRODUCT` | Purchase a product (for e-commerce simulations) | +| `INTERVIEW` | Interview a user and record the interview result in the database | ## Arguments for `ManualAction` @@ -286,3 +287,11 @@ action = ManualAction( args={"product_name": "Premium Subscription", "purchase_num": 1} ) ``` + +#### INTERVIEW +```python +action = ManualAction( + action=ActionType.INTERVIEW, + args={"prompt": "What is your name?"} +) +``` diff --git a/docs/key_modules/social_agent.mdx b/docs/key_modules/social_agent.mdx index 99200d40..e049bdab 100644 --- a/docs/key_modules/social_agent.mdx +++ b/docs/key_modules/social_agent.mdx @@ -19,6 +19,7 @@ When initializing a `SocialAgent`, you can configure the following core paramete | `available_actions` | [`list[ActionType]`](https://docs.oasis.camel-ai.org/key_modules/actions) or `None` | ✗ | `None` | List of allowed actions in the social platform. For more details, see [Actions Module](https://docs.oasis.camel-ai.org/key_modules/actions). If `None`, all actions are enabled by default. | | `tools` | `List[Union[FunctionTool, Callable]]` or `None` | ✗ | `None` | External tools the agent can use, such as a `get_weather` function, a `Toolkit`, or an `MCPToolkit` from [CAMEL](https://docs.camel-ai.org/key_modules/tools.html). If set to `None`, the agent will not be able to use any external tools. | | `single_iteration` | `bool` | ✗ | `True` | Whether the agent performs only a single round of reasoning when taking an LLM action. If `False`, the agent may continue acting based on the outcome of previous actions or tool calls. | +| `interview_record` | `bool` | ✗ | `False` | Whether to record the interview prompt and result in the agent's memory. | For more details on the `model` and the `tools` parameter, see [Models Module](https://docs.oasis.camel-ai.org/key_modules/models) and [Toolkits Module](https://docs.oasis.camel-ai.org/key_modules/toolkits). diff --git a/examples/experiment/reddit_emall_demo/emall_simulation.py b/examples/experiment/reddit_emall_demo/emall_simulation.py index f0048215..f726e9f4 100644 --- a/examples/experiment/reddit_emall_demo/emall_simulation.py +++ b/examples/experiment/reddit_emall_demo/emall_simulation.py @@ -137,7 +137,7 @@ async def running( ) agent_graph = await generate_reddit_agents( agent_info_path=user_path, - twitter_channel=twitter_channel, + channel=twitter_channel, agent_graph=agent_graph, agent_user_id_mapping=id_mapping, follow_post_agent=follow_post_agent, diff --git a/examples/experiment/reddit_gpt_example/reddit_simulation_gpt.py b/examples/experiment/reddit_gpt_example/reddit_simulation_gpt.py index 201970d9..e3a9146a 100644 --- a/examples/experiment/reddit_gpt_example/reddit_simulation_gpt.py +++ b/examples/experiment/reddit_gpt_example/reddit_simulation_gpt.py @@ -136,7 +136,7 @@ async def running( ) agent_graph = await generate_reddit_agents( agent_info_path=user_path, - twitter_channel=twitter_channel, + channel=twitter_channel, agent_graph=agent_graph, agent_user_id_mapping=id_mapping, follow_post_agent=follow_post_agent, diff --git a/examples/experiment/reddit_simulation_align_with_human/reddit_simulation_align_with_human.py b/examples/experiment/reddit_simulation_align_with_human/reddit_simulation_align_with_human.py index a4e7988d..999fbf26 100644 --- a/examples/experiment/reddit_simulation_align_with_human/reddit_simulation_align_with_human.py +++ b/examples/experiment/reddit_simulation_align_with_human/reddit_simulation_align_with_human.py @@ -139,7 +139,7 @@ async def running( ) agent_graph = await generate_reddit_agents( agent_info_path=user_path, - twitter_channel=twitter_channel, + channel=twitter_channel, agent_graph=agent_graph, agent_user_id_mapping=id_mapping, follow_post_agent=follow_post_agent, diff --git a/examples/experiment/reddit_simulation_counterfactual/reddit_simulation_counterfactual.py b/examples/experiment/reddit_simulation_counterfactual/reddit_simulation_counterfactual.py index f42f6191..fc97fa16 100644 --- a/examples/experiment/reddit_simulation_counterfactual/reddit_simulation_counterfactual.py +++ b/examples/experiment/reddit_simulation_counterfactual/reddit_simulation_counterfactual.py @@ -137,7 +137,7 @@ async def running( ) agent_graph = await generate_reddit_agents( agent_info_path=user_path, - twitter_channel=twitter_channel, + channel=twitter_channel, agent_graph=agent_graph, agent_user_id_mapping=id_mapping, follow_post_agent=follow_post_agent, diff --git a/examples/experiment/twitter_gpt_example/twitter_simulation.py b/examples/experiment/twitter_gpt_example/twitter_simulation.py index e514bdde..acc07f09 100644 --- a/examples/experiment/twitter_gpt_example/twitter_simulation.py +++ b/examples/experiment/twitter_gpt_example/twitter_simulation.py @@ -127,7 +127,7 @@ async def running( start_hour = 13 agent_graph = await generate_agents(agent_info_path=csv_path, - twitter_channel=twitter_channel, + channel=twitter_channel, start_time=start_time, model=model, recsys_type=recsys_type, diff --git a/examples/experiment/twitter_gpt_example_openai_embedding/twitter_simulation.py b/examples/experiment/twitter_gpt_example_openai_embedding/twitter_simulation.py index 4a9e5f5f..461a5cda 100644 --- a/examples/experiment/twitter_gpt_example_openai_embedding/twitter_simulation.py +++ b/examples/experiment/twitter_gpt_example_openai_embedding/twitter_simulation.py @@ -129,7 +129,7 @@ async def running( start_hour = 13 agent_graph = await generate_agents(agent_info_path=csv_path, - twitter_channel=twitter_channel, + channel=twitter_channel, start_time=start_time, model=model, recsys_type=recsys_type, diff --git a/examples/experiment/twitter_simulation/align_with_real_world/twitter_simulation_large.py b/examples/experiment/twitter_simulation/align_with_real_world/twitter_simulation_large.py index 59d1ae16..2e2841a4 100644 --- a/examples/experiment/twitter_simulation/align_with_real_world/twitter_simulation_large.py +++ b/examples/experiment/twitter_simulation/align_with_real_world/twitter_simulation_large.py @@ -135,7 +135,7 @@ async def running( model_configs = model_configs or {} agent_graph = await generate_agents(agent_info_path=csv_path, - twitter_channel=twitter_channel, + channel=twitter_channel, start_time=start_time, model=models, recsys_type=recsys_type, diff --git a/examples/experiment/twitter_simulation/group_polarization/twitter_simulation_group_polar.py b/examples/experiment/twitter_simulation/group_polarization/twitter_simulation_group_polar.py index 225a9c8e..7a3e1578 100644 --- a/examples/experiment/twitter_simulation/group_polarization/twitter_simulation_group_polar.py +++ b/examples/experiment/twitter_simulation/group_polarization/twitter_simulation_group_polar.py @@ -135,7 +135,7 @@ async def running( model_configs = model_configs or {} agent_graph = await generate_agents_100w( agent_info_path=csv_path, - twitter_channel=twitter_channel, + channel=twitter_channel, start_time=start_time, recsys_type=recsys_type, twitter=infra, diff --git a/examples/experiment/twitter_simulation_1M_agents/twitter_simulation_1m.py b/examples/experiment/twitter_simulation_1M_agents/twitter_simulation_1m.py index b211a929..0aaee990 100644 --- a/examples/experiment/twitter_simulation_1M_agents/twitter_simulation_1m.py +++ b/examples/experiment/twitter_simulation_1M_agents/twitter_simulation_1m.py @@ -126,7 +126,7 @@ async def running( agent_graph = await generate_agents_100w( agent_info_path=csv_path, - twitter_channel=twitter_channel, + channel=twitter_channel, start_time=start_time, recsys_type=recsys_type, twitter=infra, diff --git a/examples/twitter_interview.py b/examples/twitter_interview.py new file mode 100644 index 00000000..d2a3c8c2 --- /dev/null +++ b/examples/twitter_interview.py @@ -0,0 +1,166 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import asyncio +import json +import os +import sqlite3 + +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + +import oasis +from oasis import (ActionType, LLMAction, ManualAction, + generate_twitter_agent_graph) + + +async def main(): + openai_model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + ) + + # Define the available actions for the agents + # Note: INTERVIEW is NOT included here to + # prevent LLM from automatically selecting it + # INTERVIEW can still be used manually via ManualAction + available_actions = [ + ActionType.CREATE_POST, + ActionType.LIKE_POST, + ActionType.REPOST, + ActionType.FOLLOW, + ActionType.DO_NOTHING, + ActionType.QUOTE_POST, + ] + + agent_graph = await generate_twitter_agent_graph( + profile_path=("data/twitter_dataset/anonymous_topic_200_1h/" + "False_Business_0.csv"), + model=openai_model, + available_actions=available_actions, + ) + + # Define the path to the database + db_path = "./data/twitter_simulation.db" + + # Delete the old database + if os.path.exists(db_path): + os.remove(db_path) + + # Make the environment + env = oasis.make( + agent_graph=agent_graph, + platform=oasis.DefaultPlatformType.TWITTER, + database_path=db_path, + ) + + # Run the environment + await env.reset() + + # First timestep: Agent 0 creates a post + actions_1 = {} + actions_1[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Earth is flat."}) + await env.step(actions_1) + + # Second timestep: Let some agents respond with LLM actions + actions_2 = { + agent: LLMAction() + # Activate 5 agents with id 1, 3, 5, 7, 9 + for _, agent in env.agent_graph.get_agents([1, 3, 5, 7, 9]) + } + await env.step(actions_2) + + # Third timestep: Agent 1 creates a post, and we interview Agent 0 + actions_3 = {} + actions_3[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Earth is not flat."}) + + # Create an interview action to ask Agent 0 about their views + # ActionType.INTERVIEW is a external action, + # which can not be exceuted by agents themselves + actions_3[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={ + "prompt": "What do you think about the shape of the Earth?" + }) + + await env.step(actions_3) + + # Fourth timestep: Let some other agents respond + actions_4 = { + agent: LLMAction() + for _, agent in env.agent_graph.get_agents([2, 4, 6, 8, 10]) + } + await env.step(actions_4) + + # Fifth timestep: Interview multiple agents + actions_5 = {} + actions_5[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "Why do you believe the Earth is not flat?"}) + + actions_5[env.agent_graph.get_agent(2)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={ + "prompt": + "What are your thoughts on the debate about Earth's shape?" + }) + + await env.step(actions_5) + + # Sixth timestep: Final LLM actions for remaining agents + actions_6 = { + agent: LLMAction() + for _, agent in env.agent_graph.get_agents([3, 5, 7, 9]) + } + await env.step(actions_6) + + # Close the environment + await env.close() + + # visualize the interview results + print("\n=== Interview Results ===") + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + # Here we query all interview records from the database + # We use ActionType.INTERVIEW.value as the query condition + # to get all interview records + # Each record contains user ID, interview information + # (in JSON format), and creation timestamp + cursor.execute( + """ + SELECT user_id, info, created_at + FROM trace + WHERE action = ? + """, (ActionType.INTERVIEW.value, )) + + # This query retrieves all interview records from the trace table + # - user_id: the ID of the agent who was interviewed + # - info: JSON string containing interview details (prompt, response, etc.) + # - created_at: timestamp when the interview was conducted + # We'll parse this data below to display the interview results + for user_id, info_json, timestamp in cursor.fetchall(): + info = json.loads(info_json) + print(f"\nAgent {user_id} (Timestep {timestamp}):") + print(f"Prompt: {info.get('prompt', 'N/A')}") + print(f"Interview ID: {info.get('interview_id', 'N/A')}") + print(f"Response: {info.get('response', 'N/A')}") + + conn.close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/oasis/environment/env.py b/oasis/environment/env.py index e64caeeb..4d815c60 100644 --- a/oasis/environment/env.py +++ b/oasis/environment/env.py @@ -127,6 +127,12 @@ async def _perform_llm_action(self, agent): async with self.llm_semaphore: return await agent.perform_action_by_llm() + async def _perform_interview_action(self, agent, interview_prompt: str): + r"""Send the request to the llm model and execute the interview. + """ + async with self.llm_semaphore: + return await agent.perform_interview(interview_prompt) + async def step( self, actions: dict[SocialAgent, Union[ManualAction, LLMAction, List[Union[ManualAction, @@ -152,17 +158,34 @@ async def step( if isinstance(action, list): for single_action in action: if isinstance(single_action, ManualAction): - tasks.append( - agent.perform_action_by_data( - single_action.action_type, - **single_action.action_args)) + if single_action.action_type == ActionType.INTERVIEW: + # Use the agent's perform_interview method for + # interview actions + interview_prompt = single_action.action_args.get( + "prompt", "") + tasks.append( + self._perform_interview_action( + agent, interview_prompt)) + else: + tasks.append( + agent.perform_action_by_data( + single_action.action_type, + **single_action.action_args)) elif isinstance(single_action, LLMAction): tasks.append(self._perform_llm_action(agent)) else: if isinstance(action, ManualAction): - tasks.append( - agent.perform_action_by_data(action.action_type, - **action.action_args)) + if action.action_type == ActionType.INTERVIEW: + # Use the agent's perform_interview method for + # interview actions + interview_prompt = action.action_args.get("prompt", "") + tasks.append( + self._perform_interview_action( + agent, interview_prompt)) + else: + tasks.append( + agent.perform_action_by_data( + action.action_type, **action.action_args)) elif isinstance(action, LLMAction): tasks.append(self._perform_llm_action(agent)) diff --git a/oasis/social_agent/agent.py b/oasis/social_agent/agent.py index f9fb8830..4fe76679 100644 --- a/oasis/social_agent/agent.py +++ b/oasis/social_agent/agent.py @@ -59,18 +59,18 @@ def __init__(self, agent_id: int, user_info: UserInfo, user_info_template: TextPrompt | None = None, - twitter_channel: Channel | None = None, + channel: Channel | None = None, model: Optional[Union[BaseModelBackend, List[BaseModelBackend]]] = None, agent_graph: "AgentGraph" = None, available_actions: list[ActionType] = None, tools: Optional[List[Union[FunctionTool, Callable]]] = None, - single_iteration: bool = True): + single_iteration: bool = True, + interview_record: bool = False): self.social_agent_id = agent_id self.user_info = user_info - self.twitter_channel = twitter_channel or Channel() - self.env = SocialEnvironment( - SocialAction(agent_id, self.twitter_channel)) + self.channel = channel or Channel() + self.env = SocialEnvironment(SocialAction(agent_id, self.channel)) if user_info_template is None: system_message_content = self.user_info.to_system_message() else: @@ -107,6 +107,7 @@ def __init__(self, scheduling_strategy='random_model', tools=all_tools, single_iteration=single_iteration) + self.interview_record = interview_record self.agent_graph = agent_graph self.test_prompt = ( "\n" @@ -152,8 +153,9 @@ async def perform_action_by_llm(self): async def perform_test(self): """ - doing test for all agents. + doing group polarization test for all agents. TODO: rewrite the function according to the ChatAgent. + TODO: unify the test and interview function. """ # user conduct test to agent _ = BaseMessage.make_user_message(role_name="User", @@ -178,8 +180,8 @@ async def perform_test(self): # NOTE: this is a temporary solution. # Camel can not stop updating the agents' memory after stop and astep # now. - response = self._get_model_response(openai_messages=openai_messages, - num_tokens=num_tokens) + response = await self._aget_model_response( + openai_messages=openai_messages, num_tokens=num_tokens) content = response.output_messages[0].content agent_log.info( f"Agent {self.social_agent_id} receive response: {content}") @@ -189,6 +191,60 @@ async def perform_test(self): "content": content } + async def perform_interview(self, interview_prompt: str): + """ + Perform an interview with the agent. + """ + # user conduct test to agent + user_msg = BaseMessage.make_user_message( + role_name="User", content=("You are a twitter user.")) + + if self.interview_record: + # Test memory should not be writed to memory. + self.update_memory(message=user_msg, role=OpenAIBackendRole.SYSTEM) + + openai_messages, num_tokens = self.memory.get_context() + + openai_messages = ([{ + "role": + self.system_message.role_name, + "content": + self.system_message.content.split("# RESPONSE FORMAT")[0], + }] + openai_messages + [{ + "role": "user", + "content": interview_prompt + }]) + + agent_log.info(f"Agent {self.social_agent_id}: {openai_messages}") + # NOTE: this is a temporary solution. + # Camel can not stop updating the agents' memory after stop and astep + # now. + + response = await self._aget_model_response( + openai_messages=openai_messages, num_tokens=num_tokens) + + content = response.output_messages[0].content + + if self.interview_record: + # Test memory should not be writed to memory. + self.update_memory(message=response.output_messages[0], + role=OpenAIBackendRole.USER) + agent_log.info( + f"Agent {self.social_agent_id} receive response: {content}") + + # Record the complete interview (prompt + response) through the channel + interview_data = {"prompt": interview_prompt, "response": content} + result = await self.env.action.perform_action( + interview_data, ActionType.INTERVIEW.value) + + # Return the combined result + return { + "user_id": self.social_agent_id, + "prompt": openai_messages, + "content": content, + "success": result.get("success", False) + } + async def perform_action_by_hci(self) -> Any: print("Please choose one function to perform:") function_list = self.env.action.get_openai_function_list() diff --git a/oasis/social_agent/agent_action.py b/oasis/social_agent/agent_action.py index e9fcb1e6..528388e3 100644 --- a/oasis/social_agent/agent_action.py +++ b/oasis/social_agent/agent_action.py @@ -50,6 +50,7 @@ def get_openai_function_list(self) -> list[FunctionTool]: self.mute, self.unmute, self.purchase_product, + self.interview, ] ] @@ -642,3 +643,25 @@ async def purchase_product(self, product_name: str, purchase_num: int): purchase_message = (product_name, purchase_num) return await self.perform_action(purchase_message, ActionType.PURCHASE_PRODUCT.value) + + async def interview(self, prompt: str): + r"""Interview an agent with the given prompt. + + This method invokes an asynchronous action to interview an agent with a + specific prompt question. Upon successful execution, + it returns a dictionary containing a success status + and an interview_id for tracking. + + Args: + prompt (str): The interview question or prompt to ask the agent. + + Returns: + dict: A dictionary containing success status and an interview_id. + + Example of a successful return: + { + "success": True, + "interview_id": "1621234567_0" # Timestamp_UserID format + } + """ + return await self.perform_action(prompt, ActionType.INTERVIEW.value) diff --git a/oasis/social_agent/agents_generator.py b/oasis/social_agent/agents_generator.py index 51a591b1..a8591ae3 100644 --- a/oasis/social_agent/agents_generator.py +++ b/oasis/social_agent/agents_generator.py @@ -33,7 +33,7 @@ async def generate_agents( agent_info_path: str, - twitter_channel: Channel, + channel: Channel, model: Union[BaseModelBackend, List[BaseModelBackend]], start_time, recsys_type: str = "twitter", @@ -93,7 +93,7 @@ class instances. agent = SocialAgent( agent_id=agent_id, user_info=user_info, - twitter_channel=twitter_channel, + channel=channel, model=model, agent_graph=agent_graph, available_actions=available_actions, @@ -178,7 +178,7 @@ class instances. async def generate_agents_100w( agent_info_path: str, - twitter_channel: Channel, + channel: Channel, start_time, model: Union[BaseModelBackend, List[BaseModelBackend]], recsys_type: str = "twitter", @@ -248,7 +248,7 @@ class instances. agent = SocialAgent( agent_id=agent_id, user_info=user_info, - twitter_channel=twitter_channel, + channel=channel, model=model, agent_graph=agent_graph, available_actions=available_actions, @@ -362,7 +362,7 @@ async def generate_controllable_agents( # controllable的agent_id全都在llm agent的agent_id的前面 agent = SocialAgent(agent_id=i, user_info=user_info, - twitter_channel=channel, + channel=channel, agent_graph=agent_graph) # Add agent to the agent graph agent_graph.add_agent(agent) @@ -389,7 +389,7 @@ async def generate_controllable_agents( async def gen_control_agents_with_data( channel: Channel, control_user_num: int, - models: list[BaseModelBackend], + models: list[BaseModelBackend] | None = None, ) -> tuple[AgentGraph, dict]: agent_graph = AgentGraph() agent_user_id_mapping = {} @@ -411,7 +411,7 @@ async def gen_control_agents_with_data( agent = SocialAgent( agent_id=i, user_info=user_info, - twitter_channel=channel, + channel=channel, agent_graph=agent_graph, model=models, available_actions=None, @@ -430,7 +430,7 @@ async def gen_control_agents_with_data( async def generate_reddit_agents( agent_info_path: str, - twitter_channel: Channel, + channel: Channel, agent_graph: AgentGraph | None = None, agent_user_id_mapping: dict[int, int] | None = None, follow_post_agent: bool = False, @@ -472,7 +472,7 @@ async def process_agent(i): agent = SocialAgent( agent_id=i + control_user_num, user_info=user_info, - twitter_channel=twitter_channel, + channel=channel, agent_graph=agent_graph, model=model, available_actions=available_actions, @@ -538,7 +538,7 @@ def connect_platform_channel( agent_graph: AgentGraph | None = None, ) -> AgentGraph: for _, agent in agent_graph.get_agents(): - agent.twitter_channel = channel + agent.channel = channel agent.env.action.channel = channel return agent_graph diff --git a/oasis/social_platform/platform.py b/oasis/social_platform/platform.py index 72e6ee50..b62908ef 100644 --- a/oasis/social_platform/platform.py +++ b/oasis/social_platform/platform.py @@ -1340,3 +1340,49 @@ async def do_nothing(self, agent_id: int): return {"success": True} except Exception as e: return {"success": False, "error": str(e)} + + async def interview(self, agent_id: int, interview_data): + """Interview an agent with the given prompt and record the response. + + Args: + agent_id (int): The ID of the agent being interviewed. + interview_data: Either a string (prompt only) or dict with prompt + and response. + + Returns: + dict: A dictionary with success status. + """ + if self.recsys_type == RecsysType.REDDIT: + current_time = self.sandbox_clock.time_transfer( + datetime.now(), self.start_time) + else: + current_time = self.sandbox_clock.get_time_step() + try: + user_id = agent_id + + # Handle both old format (string prompt) and new format + # (dict with prompt + response) + if isinstance(interview_data, str): + # Old format: just the prompt + prompt = interview_data + response = None + interview_id = f"{current_time}_{user_id}" + action_info = {"prompt": prompt, "interview_id": interview_id} + else: + # New format: dict with prompt and response + prompt = interview_data.get("prompt", "") + response = interview_data.get("response", "") + interview_id = f"{current_time}_{user_id}" + action_info = { + "prompt": prompt, + "response": response, + "interview_id": interview_id + } + + # Record the interview in the trace table + self.pl_utils._record_trace(user_id, ActionType.INTERVIEW.value, + action_info, current_time) + + return {"success": True, "interview_id": interview_id} + except Exception as e: + return {"success": False, "error": str(e)} diff --git a/oasis/social_platform/typing.py b/oasis/social_platform/typing.py index cb68be08..62b0c859 100644 --- a/oasis/social_platform/typing.py +++ b/oasis/social_platform/typing.py @@ -40,6 +40,7 @@ class ActionType(Enum): UNDO_DISLIKE_COMMENT = "undo_dislike_comment" DO_NOTHING = "do_nothing" PURCHASE_PRODUCT = "purchase_product" + INTERVIEW = "interview" class RecsysType(Enum): diff --git a/test/agent/test_agent_custom_prompt.py b/test/agent/test_agent_custom_prompt.py index f8cf18f9..85d35a2d 100644 --- a/test/agent/test_agent_custom_prompt.py +++ b/test/agent/test_agent_custom_prompt.py @@ -47,7 +47,7 @@ async def test_agents_profile(): agent = SocialAgent(agent_id=0, user_info=user_info, user_info_template=user_info_template, - twitter_channel=channel) + channel=channel) assert agent.system_message.content == ( 'Your aim is: Persuade people to buy a product. Your task is: ' 'Using roleplay to tell some story about the product.') @@ -84,7 +84,7 @@ async def test_agents_posting(setup_platform): agent = SocialAgent(agent_id=0, user_info=user_info, user_info_template=user_info_template, - twitter_channel=channel) + channel=channel) await agent.env.action.sign_up("user0", "User0", "A bio.") # create post diff --git a/test/agent/test_agent_generator.py b/test/agent/test_agent_generator.py index a0f2d9be..e3152c24 100644 --- a/test/agent/test_agent_generator.py +++ b/test/agent/test_agent_generator.py @@ -42,7 +42,7 @@ async def running(): task = asyncio.create_task(infra.running()) os.environ["SANDBOX_TIME"] = "0" agent_graph = await generate_agents(agent_info_path=agent_info_path, - twitter_channel=twitter_channel, + channel=twitter_channel, model=model, twitter=infra, start_time=0) diff --git a/test/agent/test_agent_graph.py b/test/agent/test_agent_graph.py index f1401ab1..a6bf1ad6 100644 --- a/test/agent/test_agent_graph.py +++ b/test/agent/test_agent_graph.py @@ -35,17 +35,17 @@ def test_agent_graph(tmp_path): agent_0 = SocialAgent( agent_id=0, user_info=UserInfo(name="0"), - twitter_channel=twitter_channel, + channel=twitter_channel, ) agent_1 = SocialAgent( agent_id=1, user_info=UserInfo(name="1"), - twitter_channel=twitter_channel, + channel=twitter_channel, ) agent_2 = SocialAgent( agent_id=2, user_info=UserInfo(name="2"), - twitter_channel=twitter_channel, + channel=twitter_channel, ) graph.add_agent(agent_0) diff --git a/test/agent/test_agent_tools.py b/test/agent/test_agent_tools.py index 44d5fc3e..25b95681 100644 --- a/test/agent/test_agent_tools.py +++ b/test/agent/test_agent_tools.py @@ -64,7 +64,7 @@ async def test_agents_posting(setup_platform): profile=profile) agent = SocialAgent(agent_id=0, user_info=user_info, - twitter_channel=channel, + channel=channel, tools=MathToolkit().get_tools(), available_actions=[ActionType.CREATE_POST], single_iteration=False) diff --git a/test/agent/test_interview_action.py b/test/agent/test_interview_action.py new file mode 100644 index 00000000..e83f0491 --- /dev/null +++ b/test/agent/test_interview_action.py @@ -0,0 +1,472 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import asyncio +import json +import os +import os.path as osp +import sqlite3 +import tempfile + +import pytest +from camel.models import ModelFactory +from camel.types import ModelPlatformType, ModelType + +import oasis +from oasis import ActionType, ManualAction, generate_twitter_agent_graph +from oasis.social_agent.agent import SocialAgent +from oasis.social_platform.channel import Channel +from oasis.social_platform.config import UserInfo +from oasis.social_platform.platform import Platform + +parent_folder = osp.dirname(osp.abspath(__file__)) +test_db_filepath = osp.join(parent_folder, "test_interview.db") + + +@pytest.fixture +def setup_interview_test(): + """Setup fixture for interview tests.""" + if os.path.exists(test_db_filepath): + os.remove(test_db_filepath) + yield + # Cleanup after test + if os.path.exists(test_db_filepath): + os.remove(test_db_filepath) + + +@pytest.mark.asyncio +async def test_single_interview_action(setup_interview_test): + """Test conducting a single interview with an agent.""" + agents = [] + channel = Channel() + infra = Platform(test_db_filepath, channel) + task = asyncio.create_task(infra.running()) + + try: + # Create and sign up a test agent + real_name = "TestAgent" + description = "A test agent for interview testing." + profile = { + "nodes": [], + "edges": [], + "other_info": { + "user_profile": + "I am a test agent with strong opinions on technology.", + "mbti": "INTJ", + "activity_level": ["online"] * 24, + "activity_level_frequency": [5] * 24, + "active_threshold": [0.5] * 24, + }, + } + user_info = UserInfo(name=real_name, + description=description, + profile=profile) + agent = SocialAgent(agent_id=0, user_info=user_info, channel=channel) + + # Sign up the agent + return_message = await agent.env.action.sign_up( + "testuser", "TestUser", "A test bio.") + assert return_message["success"] is True + agents.append(agent) + + # Conduct an interview + interview_prompt = "What are your thoughts on artificial intelligence?" + return_message = await agent.env.action.interview(interview_prompt) + assert return_message["success"] is True + assert "interview_id" in return_message + + # Verify the interview was recorded in the database + conn = sqlite3.connect(test_db_filepath) + cursor = conn.cursor() + cursor.execute( + """ + SELECT user_id, info, action + FROM trace + WHERE action = ? AND user_id = ? + """, (ActionType.INTERVIEW.value, 0)) + + interview_records = cursor.fetchall() + assert len(interview_records) == 1 + + user_id, info_json, action = interview_records[0] + assert user_id == 0 + assert action == ActionType.INTERVIEW.value + + info = json.loads(info_json) + assert info["prompt"] == interview_prompt + assert "interview_id" in info + + conn.close() + + finally: + await channel.write_to_receive_queue((None, None, ActionType.EXIT)) + await task + + +@pytest.mark.asyncio +async def test_multiple_interviews_action(setup_interview_test): + """Test conducting multiple interviews with different agents.""" + agents = [] + channel = Channel() + infra = Platform(test_db_filepath, channel) + task = asyncio.create_task(infra.running()) + + try: + # Create and sign up multiple test agents + for i in range(3): + real_name = f"TestAgent{i}" + description = f"Test agent {i} for interview testing." + profile = { + "nodes": [], + "edges": [], + "other_info": { + "user_profile": + f"I am test agent {i} with unique perspectives.", + "mbti": "INTJ", + "activity_level": ["online"] * 24, + "activity_level_frequency": [5] * 24, + "active_threshold": [0.5] * 24, + }, + } + user_info = UserInfo(name=real_name, + description=description, + profile=profile) + agent = SocialAgent(agent_id=i, + user_info=user_info, + channel=channel) + + # Sign up the agent + return_message = await agent.env.action.sign_up( + f"testuser{i}", f"TestUser{i}", f"Test bio {i}.") + assert return_message["success"] is True + agents.append(agent) + + # Conduct interviews with different prompts + interview_prompts = [ + "What is your opinion on climate change?", + "How do you feel about social media?", + "What are your thoughts on remote work?" + ] + + for i, (agent, prompt) in enumerate(zip(agents, interview_prompts)): + return_message = await agent.env.action.interview(prompt) + assert return_message["success"] is True + assert "interview_id" in return_message + + # Verify all interviews were recorded + conn = sqlite3.connect(test_db_filepath) + cursor = conn.cursor() + cursor.execute( + """ + SELECT user_id, info, action + FROM trace + WHERE action = ? + ORDER BY user_id + """, (ActionType.INTERVIEW.value, )) + + interview_records = cursor.fetchall() + assert len(interview_records) == 3 + + for i, (user_id, info_json, action) in enumerate(interview_records): + assert user_id == i + assert action == ActionType.INTERVIEW.value + + info = json.loads(info_json) + assert info["prompt"] == interview_prompts[i] + assert "interview_id" in info + + conn.close() + + finally: + await channel.write_to_receive_queue((None, None, ActionType.EXIT)) + await task + + +@pytest.mark.asyncio +async def test_interview_with_environment(): + """Test interview functionality using the full OASIS environment.""" + # Create a temporary database file + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp_file: + db_path = tmp_file.name + + try: + openai_model = ModelFactory.create( + model_platform=ModelPlatformType.OPENAI, + model_type=ModelType.GPT_4O_MINI, + ) + + available_actions = [ + ActionType.CREATE_POST, + ActionType.LIKE_POST, + ActionType.REPOST, + ActionType.FOLLOW, + ActionType.DO_NOTHING, + ] + + # Create a minimal agent graph for testing + agent_graph = await generate_twitter_agent_graph( + profile_path=("data/twitter_dataset/anonymous_topic_200_1h/" + "False_Business_0.csv"), + model=openai_model, + available_actions=available_actions, + ) + + # Create environment + env = oasis.make( + agent_graph=agent_graph, + platform=oasis.DefaultPlatformType.TWITTER, + database_path=db_path, + ) + + await env.reset() + + # Test single interview action + actions = {} + actions[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "What is your favorite color and why?"}) + await env.step(actions) + + # Test multiple interviews in one step + actions = {} + actions[env.agent_graph.get_agent(1)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "What do you think about technology?"}) + actions[env.agent_graph.get_agent(2)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "How do you spend your free time?"}) + await env.step(actions) + + # Test mixing interview with other actions + actions = {} + actions[env.agent_graph.get_agent(0)] = ManualAction( + action_type=ActionType.CREATE_POST, + action_args={"content": "Hello world!"}) + actions[env.agent_graph.get_agent(3)] = ManualAction( + action_type=ActionType.INTERVIEW, + action_args={"prompt": "What motivates you in life?"}) + await env.step(actions) + + await env.close() + + # Verify interview data in database + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + cursor.execute( + """ + SELECT user_id, info, created_at + FROM trace + WHERE action = ? + ORDER BY created_at + """, (ActionType.INTERVIEW.value, )) + + interview_records = cursor.fetchall() + assert len(interview_records) == 4 # 4 interviews conducted + + # Check interview content + expected_prompts = { + "What is your favorite color and why?", + "What do you think about technology?", + "How do you spend your free time?", "What motivates you in life?" + } + + # Get all records and verify content exists + actual_prompts = set() + for user_id, info_json, timestamp in interview_records: + info = json.loads(info_json) + actual_prompts.add(info["prompt"]) + assert "interview_id" in info + assert "response" in info + + # Use set comparison to verify all expected prompts exist + assert actual_prompts == expected_prompts, ( + f"Missing prompts: {expected_prompts - actual_prompts}, " + f"Unexpected prompts: {actual_prompts - expected_prompts}") + + conn.close() + + finally: + # Cleanup + if os.path.exists(db_path): + os.remove(db_path) + + +@pytest.mark.asyncio +async def test_interview_data_retrieval(setup_interview_test): + """Test retrieving and analyzing interview data from the database.""" + agents = [] + channel = Channel() + infra = Platform(test_db_filepath, channel) + task = asyncio.create_task(infra.running()) + + try: + # Create and sign up test agents + for i in range(2): + real_name = f"TestAgent{i}" + description = f"Test agent {i}." + profile = { + "nodes": [], + "edges": [], + "other_info": { + "user_profile": f"Agent {i} profile", + "mbti": "INTJ", + "activity_level": ["online"] * 24, + "activity_level_frequency": [5] * 24, + "active_threshold": [0.5] * 24, + }, + } + user_info = UserInfo(name=real_name, + description=description, + profile=profile) + agent = SocialAgent(agent_id=i, + user_info=user_info, + channel=channel) + + return_message = await agent.env.action.sign_up( + f"testuser{i}", f"TestUser{i}", f"Bio {i}.") + assert return_message["success"] is True + agents.append(agent) + + # Conduct interviews + interview_data = [ + (0, "What is your favorite programming language?"), + (1, "How do you approach problem-solving?"), + (0, + "What are your career goals?"), # Second interview with agent 0 + ] + + for agent_id, prompt in interview_data: + return_message = await agents[agent_id].env.action.interview(prompt + ) + assert return_message["success"] is True + + # Test data retrieval functions + conn = sqlite3.connect(test_db_filepath) + cursor = conn.cursor() + + # Test 1: Get all interviews + cursor.execute( + """ + SELECT user_id, info, created_at + FROM trace + WHERE action = ? + ORDER BY created_at + """, (ActionType.INTERVIEW.value, )) + + all_interviews = cursor.fetchall() + assert len(all_interviews) == 3 + + # Test 2: Get interviews for specific agent + cursor.execute( + """ + SELECT user_id, info, created_at + FROM trace + WHERE action = ? AND user_id = ? + ORDER BY created_at + """, (ActionType.INTERVIEW.value, 0)) + + agent_0_interviews = cursor.fetchall() + assert len(agent_0_interviews) == 2 + + # Test 3: Verify interview content + for i, (user_id, info_json, timestamp) in enumerate(all_interviews): + info = json.loads(info_json) + expected_agent_id, expected_prompt = interview_data[i] + assert user_id == expected_agent_id + assert info["prompt"] == expected_prompt + assert "interview_id" in info + + conn.close() + + finally: + await channel.write_to_receive_queue((None, None, ActionType.EXIT)) + await task + + +@pytest.mark.asyncio +async def test_interview_error_handling(setup_interview_test): + """Test error handling in interview functionality.""" + agents = [] + channel = Channel() + infra = Platform(test_db_filepath, channel) + task = asyncio.create_task(infra.running()) + + try: + # Create and sign up a test agent + real_name = "TestAgent" + description = "Test agent." + profile = { + "nodes": [], + "edges": [], + "other_info": { + "user_profile": "Test profile", + "mbti": "INTJ", + "activity_level": ["online"] * 24, + "activity_level_frequency": [5] * 24, + "active_threshold": [0.5] * 24, + }, + } + user_info = UserInfo(name=real_name, + description=description, + profile=profile) + agent = SocialAgent(agent_id=0, user_info=user_info, channel=channel) + + return_message = await agent.env.action.sign_up( + "testuser", "TestUser", "Test bio.") + assert return_message["success"] is True + agents.append(agent) + + # Test with empty prompt + return_message = await agent.env.action.interview("") + assert return_message[ + "success"] is True # Empty prompt should still work + + # Test with very long prompt + long_prompt = "What do you think about your state." + return_message = await agent.env.action.interview(long_prompt) + assert return_message["success"] is True + + # Verify both interviews were recorded + conn = sqlite3.connect(test_db_filepath) + cursor = conn.cursor() + cursor.execute( + """ + SELECT COUNT(*) + FROM trace + WHERE action = ? AND user_id = ? + """, (ActionType.INTERVIEW.value, 0)) + + count = cursor.fetchone()[0] + assert count == 2 + + conn.close() + + finally: + await channel.write_to_receive_queue((None, None, ActionType.EXIT)) + await task + + +if __name__ == "__main__": + # Run tests individually for debugging + async def run_tests(): + print("Running interview action tests...") + + # You can run individual tests here for debugging + # await test_single_interview_action(None) + # await test_multiple_interviews_action(None) + # await test_interview_data_retrieval(None) + # await test_interview_error_handling(None) + + print("All tests completed!") + + # asyncio.run(run_tests()) diff --git a/test/agent/test_multi_agent_signup_create.py b/test/agent/test_multi_agent_signup_create.py index 7cd4edd6..5b6bcc73 100644 --- a/test/agent/test_multi_agent_signup_create.py +++ b/test/agent/test_multi_agent_signup_create.py @@ -65,9 +65,7 @@ async def test_agents_posting(setup_platform): user_info = UserInfo(name=real_name, description=description, profile=profile) - agent = SocialAgent(agent_id=i, - user_info=user_info, - twitter_channel=channel) + agent = SocialAgent(agent_id=i, user_info=user_info, channel=channel) await agent.env.action.sign_up(f"user{i}0101", f"User{i}", "A bio.") agents.append(agent) diff --git a/test/agent/test_twitter_user_agent_all_actions.py b/test/agent/test_twitter_user_agent_all_actions.py index 02644434..f016aa76 100644 --- a/test/agent/test_twitter_user_agent_all_actions.py +++ b/test/agent/test_twitter_user_agent_all_actions.py @@ -60,9 +60,7 @@ async def test_agents_actions(setup_twitter): user_info = UserInfo(name=real_name, description=description, profile=profile) - agent = SocialAgent(agent_id=i, - user_info=user_info, - twitter_channel=channel) + agent = SocialAgent(agent_id=i, user_info=user_info, channel=channel) return_message = await agent.env.action.sign_up( f"user{i}0101", f"User{i}", "A bio.") assert return_message["success"] is True