Skip to content

Commit 8357aa2

Browse files
committed
test(OpenAI): testing explicit conversation ID
1 parent aa1730a commit 8357aa2

4 files changed

Lines changed: 403 additions & 0 deletions

File tree

test-openai/main_conversation.py

Lines changed: 207 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,207 @@
1+
import os
2+
import uuid
3+
4+
from openai import OpenAI
5+
6+
import sentry_sdk
7+
from sentry_sdk.ai.monitoring import ai_track
8+
from sentry_sdk.ai.utils import set_conversation_id
9+
from sentry_sdk.integrations.openai import OpenAIIntegration
10+
from sentry_sdk.integrations.stdlib import StdlibIntegration
11+
12+
13+
@ai_track("Multi-turn conversation workflow")
14+
def conversation_workflow(client):
15+
# Generate a unique conversation ID for this session
16+
conversation_id = str(uuid.uuid4())
17+
print(f"Conversation ID: {conversation_id}")
18+
19+
# Set the conversation ID for all subsequent AI calls
20+
set_conversation_id(conversation_id)
21+
22+
messages = []
23+
24+
with sentry_sdk.start_transaction(name="openai-conversation"):
25+
# Turn 1: Initial question
26+
messages.append({"role": "user", "content": "Hi! I'm planning a trip to Japan. What's the best time to visit?"})
27+
28+
response = client.chat.completions.create(
29+
messages=messages,
30+
model="gpt-4o-mini",
31+
max_tokens=200,
32+
temperature=0.7,
33+
)
34+
assistant_message = response.choices[0].message.content
35+
messages.append({"role": "assistant", "content": assistant_message})
36+
print("--------------------------------")
37+
print("Turn 1 - User: Hi! I'm planning a trip to Japan. What's the best time to visit?")
38+
print(f"Turn 1 - Assistant: {assistant_message}")
39+
40+
# Turn 2: Follow-up question
41+
messages.append({"role": "user", "content": "What about cherry blossom season? When does that happen?"})
42+
43+
response = client.chat.completions.create(
44+
messages=messages,
45+
model="gpt-4o-mini",
46+
max_tokens=200,
47+
temperature=0.7,
48+
)
49+
assistant_message = response.choices[0].message.content
50+
messages.append({"role": "assistant", "content": assistant_message})
51+
print("--------------------------------")
52+
print("Turn 2 - User: What about cherry blossom season? When does that happen?")
53+
print(f"Turn 2 - Assistant: {assistant_message}")
54+
55+
# Turn 3: Another follow-up
56+
messages.append({"role": "user", "content": "Can you recommend some must-see places in Tokyo?"})
57+
58+
response = client.chat.completions.create(
59+
messages=messages,
60+
model="gpt-4o-mini",
61+
max_tokens=200,
62+
temperature=0.7,
63+
)
64+
assistant_message = response.choices[0].message.content
65+
messages.append({"role": "assistant", "content": assistant_message})
66+
print("--------------------------------")
67+
print("Turn 3 - User: Can you recommend some must-see places in Tokyo?")
68+
print(f"Turn 3 - Assistant: {assistant_message}")
69+
70+
71+
@ai_track("Interleaved conversations workflow")
72+
def interleaved_conversations_workflow(client):
73+
"""Two conversations happening simultaneously, interleaved."""
74+
# Generate unique conversation IDs for both sessions
75+
conversation_id_a = str(uuid.uuid4())
76+
conversation_id_b = str(uuid.uuid4())
77+
print(f"Conversation A ID: {conversation_id_a}")
78+
print(f"Conversation B ID: {conversation_id_b}")
79+
80+
messages_a = []
81+
messages_b = []
82+
83+
with sentry_sdk.start_transaction(name="openai-interleaved-conversations"):
84+
# Conversation A - Turn 1
85+
set_conversation_id(conversation_id_a)
86+
messages_a.append({"role": "user", "content": "I want to learn Python. Where should I start?"})
87+
88+
response = client.chat.completions.create(
89+
messages=messages_a,
90+
model="gpt-4o-mini",
91+
max_tokens=100,
92+
temperature=0.7,
93+
)
94+
assistant_message = response.choices[0].message.content
95+
messages_a.append({"role": "assistant", "content": assistant_message})
96+
print("--------------------------------")
97+
print("[Conv A - Turn 1] User: I want to learn Python. Where should I start?")
98+
print(f"[Conv A - Turn 1] Assistant: {assistant_message}")
99+
100+
# Conversation B - Turn 1
101+
set_conversation_id(conversation_id_b)
102+
messages_b.append({"role": "user", "content": "What's a good recipe for pasta?"})
103+
104+
response = client.chat.completions.create(
105+
messages=messages_b,
106+
model="gpt-4o-mini",
107+
max_tokens=100,
108+
temperature=0.7,
109+
)
110+
assistant_message = response.choices[0].message.content
111+
messages_b.append({"role": "assistant", "content": assistant_message})
112+
print("--------------------------------")
113+
print("[Conv B - Turn 1] User: What's a good recipe for pasta?")
114+
print(f"[Conv B - Turn 1] Assistant: {assistant_message}")
115+
116+
# Conversation A - Turn 2
117+
set_conversation_id(conversation_id_a)
118+
messages_a.append({"role": "user", "content": "What about web development with Python?"})
119+
120+
response = client.chat.completions.create(
121+
messages=messages_a,
122+
model="gpt-4o-mini",
123+
max_tokens=100,
124+
temperature=0.7,
125+
)
126+
assistant_message = response.choices[0].message.content
127+
messages_a.append({"role": "assistant", "content": assistant_message})
128+
print("--------------------------------")
129+
print("[Conv A - Turn 2] User: What about web development with Python?")
130+
print(f"[Conv A - Turn 2] Assistant: {assistant_message}")
131+
132+
# Conversation B - Turn 2
133+
set_conversation_id(conversation_id_b)
134+
messages_b.append({"role": "user", "content": "Can you make it vegetarian?"})
135+
136+
response = client.chat.completions.create(
137+
messages=messages_b,
138+
model="gpt-4o-mini",
139+
max_tokens=100,
140+
temperature=0.7,
141+
)
142+
assistant_message = response.choices[0].message.content
143+
messages_b.append({"role": "assistant", "content": assistant_message})
144+
print("--------------------------------")
145+
print("[Conv B - Turn 2] User: Can you make it vegetarian?")
146+
print(f"[Conv B - Turn 2] Assistant: {assistant_message}")
147+
148+
149+
def before_send_transaction(event, hint):
150+
"""Print gen_ai attributes from all spans before sending transaction."""
151+
print("\n" + "=" * 80)
152+
print("TRANSACTION EVENT - Printing all spans with gen_ai attributes")
153+
print("=" * 80)
154+
155+
spans = event.get("spans", [])
156+
for span in spans:
157+
op = span.get("op", "unknown")
158+
description = span.get("description", "no description")
159+
span_id = span.get("span_id", "unknown")
160+
data = span.get("data", {})
161+
162+
# Filter gen_ai prefixed attributes
163+
gen_ai_attrs = {k: v for k, v in data.items() if k.startswith("gen_ai")}
164+
165+
if gen_ai_attrs:
166+
print(f"\nSPAN: {op} - {description}")
167+
print(f"Span ID: {span_id}")
168+
print("gen_ai attributes:")
169+
for key, value in sorted(gen_ai_attrs.items()):
170+
print(f" {key}: {value}")
171+
172+
print("=" * 80 + "\n")
173+
return event
174+
175+
176+
def main():
177+
sentry_sdk.init(
178+
dsn=os.getenv("SENTRY_DSN", None),
179+
environment=os.getenv("ENV", "openai-test-conversation"),
180+
traces_sample_rate=1.0,
181+
profiles_sample_rate=1.0,
182+
send_default_pii=True,
183+
debug=True,
184+
before_send_transaction=before_send_transaction,
185+
integrations=[
186+
OpenAIIntegration(
187+
include_prompts=True,
188+
tiktoken_encoding_name="cl100k_base",
189+
),
190+
],
191+
disabled_integrations=[
192+
StdlibIntegration(),
193+
],
194+
)
195+
196+
client = OpenAI(
197+
api_key=os.environ.get("OPENAI_API_KEY"),
198+
)
199+
200+
interleaved_conversations_workflow(client)
201+
202+
print("--------------------------------")
203+
print("Done!")
204+
205+
206+
if __name__ == "__main__":
207+
main()
Lines changed: 166 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,166 @@
1+
import asyncio
2+
import os
3+
import uuid
4+
5+
from openai import AsyncOpenAI
6+
7+
import sentry_sdk
8+
from sentry_sdk.ai.monitoring import ai_track
9+
from sentry_sdk.ai.utils import set_conversation_id
10+
from sentry_sdk.integrations.asyncio import AsyncioIntegration
11+
from sentry_sdk.integrations.openai import OpenAIIntegration
12+
from sentry_sdk.integrations.stdlib import StdlibIntegration
13+
14+
15+
async def conversation_a(client, conversation_id: str):
16+
"""First conversation about learning Python."""
17+
set_conversation_id(conversation_id)
18+
messages = []
19+
20+
# Turn 1
21+
messages.append({"role": "user", "content": "I want to learn Python. Where should I start?"})
22+
response = await client.chat.completions.create(
23+
messages=messages,
24+
model="gpt-4o-mini",
25+
max_tokens=100,
26+
temperature=0.7,
27+
)
28+
assistant_message = response.choices[0].message.content
29+
messages.append({"role": "assistant", "content": assistant_message})
30+
print("--------------------------------")
31+
print("[Conv A - Turn 1] User: I want to learn Python. Where should I start?")
32+
print(f"[Conv A - Turn 1] Assistant: {assistant_message}")
33+
34+
# Turn 2
35+
set_conversation_id(conversation_id) # Re-set after potential context switch
36+
messages.append({"role": "user", "content": "What about web development with Python?"})
37+
response = await client.chat.completions.create(
38+
messages=messages,
39+
model="gpt-4o-mini",
40+
max_tokens=100,
41+
temperature=0.7,
42+
)
43+
assistant_message = response.choices[0].message.content
44+
messages.append({"role": "assistant", "content": assistant_message})
45+
print("--------------------------------")
46+
print("[Conv A - Turn 2] User: What about web development with Python?")
47+
print(f"[Conv A - Turn 2] Assistant: {assistant_message}")
48+
49+
return "Conversation A complete"
50+
51+
52+
async def conversation_b(client, conversation_id: str):
53+
"""Second conversation about cooking."""
54+
set_conversation_id(conversation_id)
55+
messages = []
56+
57+
# Turn 1
58+
messages.append({"role": "user", "content": "What's a good recipe for pasta?"})
59+
response = await client.chat.completions.create(
60+
messages=messages,
61+
model="gpt-4o-mini",
62+
max_tokens=100,
63+
temperature=0.7,
64+
)
65+
assistant_message = response.choices[0].message.content
66+
messages.append({"role": "assistant", "content": assistant_message})
67+
print("--------------------------------")
68+
print("[Conv B - Turn 1] User: What's a good recipe for pasta?")
69+
print(f"[Conv B - Turn 1] Assistant: {assistant_message}")
70+
71+
# Turn 2
72+
set_conversation_id(conversation_id) # Re-set after potential context switch
73+
messages.append({"role": "user", "content": "Can you make it vegetarian?"})
74+
response = await client.chat.completions.create(
75+
messages=messages,
76+
model="gpt-4o-mini",
77+
max_tokens=100,
78+
temperature=0.7,
79+
)
80+
assistant_message = response.choices[0].message.content
81+
messages.append({"role": "assistant", "content": assistant_message})
82+
print("--------------------------------")
83+
print("[Conv B - Turn 2] User: Can you make it vegetarian?")
84+
print(f"[Conv B - Turn 2] Assistant: {assistant_message}")
85+
86+
return "Conversation B complete"
87+
88+
89+
@ai_track("Concurrent conversations workflow")
90+
async def concurrent_conversations_workflow(client):
91+
"""Two conversations running concurrently with asyncio.gather()."""
92+
# Generate unique conversation IDs for both sessions
93+
conversation_id_a = str(uuid.uuid4())
94+
conversation_id_b = str(uuid.uuid4())
95+
print(f"Conversation A ID: {conversation_id_a}")
96+
print(f"Conversation B ID: {conversation_id_b}")
97+
98+
with sentry_sdk.start_transaction(name="openai-concurrent-conversations"):
99+
# Run both conversations concurrently
100+
results = await asyncio.gather(
101+
conversation_a(client, conversation_id_a),
102+
conversation_b(client, conversation_id_b),
103+
)
104+
print("--------------------------------")
105+
print(f"Results: {results}")
106+
107+
108+
def before_send_transaction(event, hint):
109+
"""Print gen_ai attributes from all spans before sending transaction."""
110+
print("\n" + "=" * 80)
111+
print("TRANSACTION EVENT - Printing all spans with gen_ai attributes")
112+
print("=" * 80)
113+
114+
spans = event.get("spans", [])
115+
for span in spans:
116+
op = span.get("op", "unknown")
117+
description = span.get("description", "no description")
118+
span_id = span.get("span_id", "unknown")
119+
data = span.get("data", {})
120+
121+
# Filter gen_ai prefixed attributes
122+
gen_ai_attrs = {k: v for k, v in data.items() if k.startswith("gen_ai")}
123+
124+
if gen_ai_attrs:
125+
print(f"\nSPAN: {op} - {description}")
126+
print(f"Span ID: {span_id}")
127+
print("gen_ai attributes:")
128+
for key, value in sorted(gen_ai_attrs.items()):
129+
print(f" {key}: {value}")
130+
131+
print("=" * 80 + "\n")
132+
return event
133+
134+
135+
async def main():
136+
sentry_sdk.init(
137+
dsn=os.getenv("SENTRY_DSN", None),
138+
environment=os.getenv("ENV", "openai-test-conversation-async"),
139+
traces_sample_rate=1.0,
140+
profiles_sample_rate=1.0,
141+
send_default_pii=True,
142+
debug=True,
143+
before_send_transaction=before_send_transaction,
144+
integrations=[
145+
AsyncioIntegration(),
146+
OpenAIIntegration(
147+
include_prompts=True,
148+
tiktoken_encoding_name="cl100k_base",
149+
),
150+
],
151+
disabled_integrations=[
152+
StdlibIntegration(),
153+
],
154+
)
155+
156+
client = AsyncOpenAI(
157+
api_key=os.environ.get("OPENAI_API_KEY"),
158+
)
159+
160+
await concurrent_conversations_workflow(client)
161+
162+
print("--------------------------------")
163+
print("Done!")
164+
165+
166+
asyncio.run(main())

test-openai/run_conversation.sh

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
#!/usr/bin/env bash
2+
3+
# exit on first error
4+
set -euo pipefail
5+
6+
set -a && source .env && set +a
7+
8+
# Install uv if it's not installed
9+
if ! command -v uv &> /dev/null; then
10+
curl -LsSf https://astral.sh/uv/install.sh | sh
11+
fi
12+
13+
# Run the script
14+
export SENTRY_SPOTLIGHT=1
15+
uv run python main_conversation.py

0 commit comments

Comments
 (0)