|
| 1 | +import os |
| 2 | +import uuid |
| 3 | + |
| 4 | +from openai import OpenAI |
| 5 | + |
| 6 | +import sentry_sdk |
| 7 | +from sentry_sdk.ai.monitoring import ai_track |
| 8 | +from sentry_sdk.ai.utils import set_conversation_id |
| 9 | +from sentry_sdk.integrations.openai import OpenAIIntegration |
| 10 | +from sentry_sdk.integrations.stdlib import StdlibIntegration |
| 11 | + |
| 12 | + |
| 13 | +@ai_track("Multi-turn conversation workflow") |
| 14 | +def conversation_workflow(client): |
| 15 | + # Generate a unique conversation ID for this session |
| 16 | + conversation_id = str(uuid.uuid4()) |
| 17 | + print(f"Conversation ID: {conversation_id}") |
| 18 | + |
| 19 | + # Set the conversation ID for all subsequent AI calls |
| 20 | + set_conversation_id(conversation_id) |
| 21 | + |
| 22 | + messages = [] |
| 23 | + |
| 24 | + with sentry_sdk.start_transaction(name="openai-conversation"): |
| 25 | + # Turn 1: Initial question |
| 26 | + messages.append({"role": "user", "content": "Hi! I'm planning a trip to Japan. What's the best time to visit?"}) |
| 27 | + |
| 28 | + response = client.chat.completions.create( |
| 29 | + messages=messages, |
| 30 | + model="gpt-4o-mini", |
| 31 | + max_tokens=200, |
| 32 | + temperature=0.7, |
| 33 | + ) |
| 34 | + assistant_message = response.choices[0].message.content |
| 35 | + messages.append({"role": "assistant", "content": assistant_message}) |
| 36 | + print("--------------------------------") |
| 37 | + print("Turn 1 - User: Hi! I'm planning a trip to Japan. What's the best time to visit?") |
| 38 | + print(f"Turn 1 - Assistant: {assistant_message}") |
| 39 | + |
| 40 | + # Turn 2: Follow-up question |
| 41 | + messages.append({"role": "user", "content": "What about cherry blossom season? When does that happen?"}) |
| 42 | + |
| 43 | + response = client.chat.completions.create( |
| 44 | + messages=messages, |
| 45 | + model="gpt-4o-mini", |
| 46 | + max_tokens=200, |
| 47 | + temperature=0.7, |
| 48 | + ) |
| 49 | + assistant_message = response.choices[0].message.content |
| 50 | + messages.append({"role": "assistant", "content": assistant_message}) |
| 51 | + print("--------------------------------") |
| 52 | + print("Turn 2 - User: What about cherry blossom season? When does that happen?") |
| 53 | + print(f"Turn 2 - Assistant: {assistant_message}") |
| 54 | + |
| 55 | + # Turn 3: Another follow-up |
| 56 | + messages.append({"role": "user", "content": "Can you recommend some must-see places in Tokyo?"}) |
| 57 | + |
| 58 | + response = client.chat.completions.create( |
| 59 | + messages=messages, |
| 60 | + model="gpt-4o-mini", |
| 61 | + max_tokens=200, |
| 62 | + temperature=0.7, |
| 63 | + ) |
| 64 | + assistant_message = response.choices[0].message.content |
| 65 | + messages.append({"role": "assistant", "content": assistant_message}) |
| 66 | + print("--------------------------------") |
| 67 | + print("Turn 3 - User: Can you recommend some must-see places in Tokyo?") |
| 68 | + print(f"Turn 3 - Assistant: {assistant_message}") |
| 69 | + |
| 70 | + |
| 71 | +@ai_track("Interleaved conversations workflow") |
| 72 | +def interleaved_conversations_workflow(client): |
| 73 | + """Two conversations happening simultaneously, interleaved.""" |
| 74 | + # Generate unique conversation IDs for both sessions |
| 75 | + conversation_id_a = str(uuid.uuid4()) |
| 76 | + conversation_id_b = str(uuid.uuid4()) |
| 77 | + print(f"Conversation A ID: {conversation_id_a}") |
| 78 | + print(f"Conversation B ID: {conversation_id_b}") |
| 79 | + |
| 80 | + messages_a = [] |
| 81 | + messages_b = [] |
| 82 | + |
| 83 | + with sentry_sdk.start_transaction(name="openai-interleaved-conversations"): |
| 84 | + # Conversation A - Turn 1 |
| 85 | + set_conversation_id(conversation_id_a) |
| 86 | + messages_a.append({"role": "user", "content": "I want to learn Python. Where should I start?"}) |
| 87 | + |
| 88 | + response = client.chat.completions.create( |
| 89 | + messages=messages_a, |
| 90 | + model="gpt-4o-mini", |
| 91 | + max_tokens=100, |
| 92 | + temperature=0.7, |
| 93 | + ) |
| 94 | + assistant_message = response.choices[0].message.content |
| 95 | + messages_a.append({"role": "assistant", "content": assistant_message}) |
| 96 | + print("--------------------------------") |
| 97 | + print("[Conv A - Turn 1] User: I want to learn Python. Where should I start?") |
| 98 | + print(f"[Conv A - Turn 1] Assistant: {assistant_message}") |
| 99 | + |
| 100 | + # Conversation B - Turn 1 |
| 101 | + set_conversation_id(conversation_id_b) |
| 102 | + messages_b.append({"role": "user", "content": "What's a good recipe for pasta?"}) |
| 103 | + |
| 104 | + response = client.chat.completions.create( |
| 105 | + messages=messages_b, |
| 106 | + model="gpt-4o-mini", |
| 107 | + max_tokens=100, |
| 108 | + temperature=0.7, |
| 109 | + ) |
| 110 | + assistant_message = response.choices[0].message.content |
| 111 | + messages_b.append({"role": "assistant", "content": assistant_message}) |
| 112 | + print("--------------------------------") |
| 113 | + print("[Conv B - Turn 1] User: What's a good recipe for pasta?") |
| 114 | + print(f"[Conv B - Turn 1] Assistant: {assistant_message}") |
| 115 | + |
| 116 | + # Conversation A - Turn 2 |
| 117 | + set_conversation_id(conversation_id_a) |
| 118 | + messages_a.append({"role": "user", "content": "What about web development with Python?"}) |
| 119 | + |
| 120 | + response = client.chat.completions.create( |
| 121 | + messages=messages_a, |
| 122 | + model="gpt-4o-mini", |
| 123 | + max_tokens=100, |
| 124 | + temperature=0.7, |
| 125 | + ) |
| 126 | + assistant_message = response.choices[0].message.content |
| 127 | + messages_a.append({"role": "assistant", "content": assistant_message}) |
| 128 | + print("--------------------------------") |
| 129 | + print("[Conv A - Turn 2] User: What about web development with Python?") |
| 130 | + print(f"[Conv A - Turn 2] Assistant: {assistant_message}") |
| 131 | + |
| 132 | + # Conversation B - Turn 2 |
| 133 | + set_conversation_id(conversation_id_b) |
| 134 | + messages_b.append({"role": "user", "content": "Can you make it vegetarian?"}) |
| 135 | + |
| 136 | + response = client.chat.completions.create( |
| 137 | + messages=messages_b, |
| 138 | + model="gpt-4o-mini", |
| 139 | + max_tokens=100, |
| 140 | + temperature=0.7, |
| 141 | + ) |
| 142 | + assistant_message = response.choices[0].message.content |
| 143 | + messages_b.append({"role": "assistant", "content": assistant_message}) |
| 144 | + print("--------------------------------") |
| 145 | + print("[Conv B - Turn 2] User: Can you make it vegetarian?") |
| 146 | + print(f"[Conv B - Turn 2] Assistant: {assistant_message}") |
| 147 | + |
| 148 | + |
| 149 | +def before_send_transaction(event, hint): |
| 150 | + """Print gen_ai attributes from all spans before sending transaction.""" |
| 151 | + print("\n" + "=" * 80) |
| 152 | + print("TRANSACTION EVENT - Printing all spans with gen_ai attributes") |
| 153 | + print("=" * 80) |
| 154 | + |
| 155 | + spans = event.get("spans", []) |
| 156 | + for span in spans: |
| 157 | + op = span.get("op", "unknown") |
| 158 | + description = span.get("description", "no description") |
| 159 | + span_id = span.get("span_id", "unknown") |
| 160 | + data = span.get("data", {}) |
| 161 | + |
| 162 | + # Filter gen_ai prefixed attributes |
| 163 | + gen_ai_attrs = {k: v for k, v in data.items() if k.startswith("gen_ai")} |
| 164 | + |
| 165 | + if gen_ai_attrs: |
| 166 | + print(f"\nSPAN: {op} - {description}") |
| 167 | + print(f"Span ID: {span_id}") |
| 168 | + print("gen_ai attributes:") |
| 169 | + for key, value in sorted(gen_ai_attrs.items()): |
| 170 | + print(f" {key}: {value}") |
| 171 | + |
| 172 | + print("=" * 80 + "\n") |
| 173 | + return event |
| 174 | + |
| 175 | + |
| 176 | +def main(): |
| 177 | + sentry_sdk.init( |
| 178 | + dsn=os.getenv("SENTRY_DSN", None), |
| 179 | + environment=os.getenv("ENV", "openai-test-conversation"), |
| 180 | + traces_sample_rate=1.0, |
| 181 | + profiles_sample_rate=1.0, |
| 182 | + send_default_pii=True, |
| 183 | + debug=True, |
| 184 | + before_send_transaction=before_send_transaction, |
| 185 | + integrations=[ |
| 186 | + OpenAIIntegration( |
| 187 | + include_prompts=True, |
| 188 | + tiktoken_encoding_name="cl100k_base", |
| 189 | + ), |
| 190 | + ], |
| 191 | + disabled_integrations=[ |
| 192 | + StdlibIntegration(), |
| 193 | + ], |
| 194 | + ) |
| 195 | + |
| 196 | + client = OpenAI( |
| 197 | + api_key=os.environ.get("OPENAI_API_KEY"), |
| 198 | + ) |
| 199 | + |
| 200 | + interleaved_conversations_workflow(client) |
| 201 | + |
| 202 | + print("--------------------------------") |
| 203 | + print("Done!") |
| 204 | + |
| 205 | + |
| 206 | +if __name__ == "__main__": |
| 207 | + main() |
0 commit comments