|
| 1 | +""" |
| 2 | +Example of using the Apex DeepResearch API asynchronously with Macrocosmos SDK. |
| 3 | +Demonstrates how a deep researcher job can be polled at regular intervals |
| 4 | +to check its current status and retrieve the latest results generated. |
| 5 | +""" |
| 6 | + |
| 7 | +import asyncio |
| 8 | +import os |
| 9 | +import json |
| 10 | +from typing import Optional, Any, List |
| 11 | + |
| 12 | +import macrocosmos as mc |
| 13 | + |
| 14 | + |
| 15 | +def extract_content_from_chunk(chunk_str: str) -> Optional[str]: |
| 16 | + """Extract content from a JSON chunk string if available.""" |
| 17 | + try: |
| 18 | + chunk_list = json.loads(chunk_str) |
| 19 | + if chunk_list and len(chunk_list) > 0 and "content" in chunk_list[0]: |
| 20 | + return chunk_list[0]["content"] |
| 21 | + except (json.JSONDecodeError, IndexError, KeyError) as e: |
| 22 | + print(f"Failed to parse chunk: {e}") |
| 23 | + return None |
| 24 | + |
| 25 | + |
| 26 | +async def process_result_chunks(results: List[Any], last_seq_id: int) -> int: |
| 27 | + """Process result chunks and return the new last sequence ID.""" |
| 28 | + for item in results: |
| 29 | + try: |
| 30 | + seq_id = int(item.seq_id) |
| 31 | + if seq_id > last_seq_id: |
| 32 | + if content := extract_content_from_chunk(item.chunk): |
| 33 | + print(f"\nseq_id {seq_id}:\n{content}") |
| 34 | + last_seq_id = seq_id |
| 35 | + except (ValueError, AttributeError) as e: |
| 36 | + print(f"Error processing sequence: {e}") |
| 37 | + return last_seq_id |
| 38 | + |
| 39 | + |
| 40 | +async def demo_deep_research_polling(): |
| 41 | + """Demo asynchronous deep research job creation and update polling.""" |
| 42 | + print("\nRunning asynchronous Deep Research example...") |
| 43 | + |
| 44 | + api_key = os.environ.get("APEX_API_KEY", os.environ.get("MACROCOSMOS_API_KEY")) |
| 45 | + |
| 46 | + client = mc.AsyncApexClient( |
| 47 | + api_key=api_key, app_name="examples/apex_deep_research_polling.py" |
| 48 | + ) |
| 49 | + |
| 50 | + # Create a deep research job with create_job |
| 51 | + submitted_response = await client.deep_research.create_job( |
| 52 | + messages=[ |
| 53 | + { |
| 54 | + "role": "user", |
| 55 | + "content": """Can you propose a mechanism by which a decentralized network |
| 56 | + of AI agents could achieve provable alignment on abstract ethical principles |
| 57 | + without relying on human-defined ontologies or centralized arbitration?""", |
| 58 | + } |
| 59 | + ] |
| 60 | + ) |
| 61 | + |
| 62 | + print("\nCreated deep research job.\n") |
| 63 | + print(f"Initial status: {submitted_response.status}") |
| 64 | + print(f"Job ID: {submitted_response.job_id}") |
| 65 | + print(f"Created at: {submitted_response.created_at}\n") |
| 66 | + |
| 67 | + # Poll for job status with get_job_results based on the job_id |
| 68 | + print("Polling the results...") |
| 69 | + last_seq_id = -1 # Track the highest sequence ID we've seen |
| 70 | + last_updated = None # Track the last update time |
| 71 | + while True: |
| 72 | + try: |
| 73 | + polled_response = await client.deep_research.get_job_results( |
| 74 | + submitted_response.job_id |
| 75 | + ) |
| 76 | + current_status = polled_response.status |
| 77 | + current_updated = polled_response.updated_at |
| 78 | + |
| 79 | + # On completion, print the final answer and its sequence ID |
| 80 | + if current_status == "completed": |
| 81 | + print("\nJob completed successfully!") |
| 82 | + print(f"\nLast update at: {current_updated}") |
| 83 | + if polled_response.result: |
| 84 | + if content := extract_content_from_chunk( |
| 85 | + polled_response.result[-1].chunk |
| 86 | + ): |
| 87 | + print( |
| 88 | + f"\nFinal answer (seq_id {polled_response.result[-1].seq_id}):\n{content}" |
| 89 | + ) |
| 90 | + break |
| 91 | + |
| 92 | + elif current_status == "failed": |
| 93 | + print( |
| 94 | + f"\nJob failed: {polled_response.error if hasattr(polled_response, 'error') else 'Unknown error'}" |
| 95 | + ) |
| 96 | + print(f"\nLast update at: {current_updated}") |
| 97 | + break |
| 98 | + |
| 99 | + # Check if we have new content by comparing update times |
| 100 | + if current_updated != last_updated: |
| 101 | + print(f"\nNew update at {current_updated}") |
| 102 | + print(f"Status: {current_status}") |
| 103 | + |
| 104 | + # Process new content |
| 105 | + if polled_response.result: |
| 106 | + last_seq_id = await process_result_chunks( |
| 107 | + polled_response.result, last_seq_id |
| 108 | + ) |
| 109 | + else: |
| 110 | + print( |
| 111 | + "No results available yet. Waiting for Deep Researcher to generate data..." |
| 112 | + ) |
| 113 | + last_updated = current_updated |
| 114 | + |
| 115 | + except Exception as e: |
| 116 | + print(f"Error during polling: {e}") |
| 117 | + |
| 118 | + await asyncio.sleep(20) # Poll in 20 second intervals |
| 119 | + |
| 120 | + |
| 121 | +if __name__ == "__main__": |
| 122 | + asyncio.run(demo_deep_research_polling()) |
0 commit comments