17
17
from unittest import mock # python 3.3 and above
18
18
19
19
20
+ EXAMPLE_CHAT_COMPLETION = ChatCompletion (
21
+ id = "chat-id" ,
22
+ choices = [
23
+ Choice (
24
+ index = 0 ,
25
+ finish_reason = "stop" ,
26
+ message = ChatCompletionMessage (
27
+ role = "assistant" , content = "the model response"
28
+ ),
29
+ )
30
+ ],
31
+ created = 10000000 ,
32
+ model = "model-id" ,
33
+ object = "chat.completion" ,
34
+ usage = CompletionUsage (
35
+ completion_tokens = 10 ,
36
+ prompt_tokens = 20 ,
37
+ total_tokens = 30 ,
38
+ ),
39
+ )
40
+
41
+
20
42
def test_nonstreaming_chat_completion (sentry_init , capture_events ):
21
43
sentry_init (
22
- integrations = [OpenAIIntegration (include_prompts = True )], traces_sample_rate = 1.0
44
+ integrations = [OpenAIIntegration ()],
45
+ traces_sample_rate = 1.0 ,
46
+ send_default_pii = True ,
23
47
)
24
48
events = capture_events ()
25
49
26
50
client = OpenAI (api_key = "z" )
27
- returned_chat = ChatCompletion (
28
- id = "chat-id" ,
29
- choices = [
30
- Choice (
31
- index = 0 ,
32
- finish_reason = "stop" ,
33
- message = ChatCompletionMessage (role = "assistant" , content = "response" ),
34
- )
35
- ],
36
- created = 10000000 ,
37
- model = "model-id" ,
38
- object = "chat.completion" ,
39
- usage = CompletionUsage (
40
- completion_tokens = 10 ,
41
- prompt_tokens = 20 ,
42
- total_tokens = 30 ,
43
- ),
44
- )
51
+ client .chat .completions ._post = mock .Mock (return_value = EXAMPLE_CHAT_COMPLETION )
45
52
46
- client .chat .completions ._post = mock .Mock (return_value = returned_chat )
47
53
with start_transaction (name = "openai tx" ):
48
54
response = (
49
55
client .chat .completions .create (
@@ -53,17 +59,63 @@ def test_nonstreaming_chat_completion(sentry_init, capture_events):
53
59
.message .content
54
60
)
55
61
56
- assert response == "response"
62
+ assert response == "the model response"
57
63
tx = events [0 ]
58
64
assert tx ["type" ] == "transaction"
59
65
span = tx ["spans" ][0 ]
60
- assert span ["op" ] == "openai.chat_completions.create"
66
+ assert span ["op" ] == "ai.chat_completions.create.openai"
67
+ assert "the model response" in span ["data" ]["ai.responses" ][0 ]
61
68
62
69
assert span ["data" ][COMPLETION_TOKENS_USED ] == 10
63
70
assert span ["data" ][PROMPT_TOKENS_USED ] == 20
64
71
assert span ["data" ][TOTAL_TOKENS_USED ] == 30
65
72
66
73
74
+ def test_stripped_pii_without_send_default_pii (sentry_init , capture_events ):
75
+ sentry_init (
76
+ integrations = [OpenAIIntegration ()],
77
+ traces_sample_rate = 1.0 ,
78
+ )
79
+ events = capture_events ()
80
+
81
+ client = OpenAI (api_key = "z" )
82
+ client .chat .completions ._post = mock .Mock (return_value = EXAMPLE_CHAT_COMPLETION )
83
+
84
+ with start_transaction (name = "openai tx" ):
85
+ client .chat .completions .create (
86
+ model = "some-model" , messages = [{"role" : "system" , "content" : "hello" }]
87
+ )
88
+
89
+ tx = events [0 ]
90
+ assert tx ["type" ] == "transaction"
91
+ span = tx ["spans" ][0 ]
92
+ assert "ai.input_messages" not in span ["data" ]
93
+ assert "ai.responses" not in span ["data" ]
94
+
95
+
96
+ def test_stripped_pii_without_send_prompts (sentry_init , capture_events ):
97
+ sentry_init (
98
+ integrations = [OpenAIIntegration (include_prompts = False )],
99
+ traces_sample_rate = 1.0 ,
100
+ send_default_pii = True ,
101
+ )
102
+ events = capture_events ()
103
+
104
+ client = OpenAI (api_key = "z" )
105
+ client .chat .completions ._post = mock .Mock (return_value = EXAMPLE_CHAT_COMPLETION )
106
+
107
+ with start_transaction (name = "openai tx" ):
108
+ client .chat .completions .create (
109
+ model = "some-model" , messages = [{"role" : "system" , "content" : "hello" }]
110
+ )
111
+
112
+ tx = events [0 ]
113
+ assert tx ["type" ] == "transaction"
114
+ span = tx ["spans" ][0 ]
115
+ assert "ai.input_messages" not in span ["data" ]
116
+ assert "ai.responses" not in span ["data" ]
117
+
118
+
67
119
# noinspection PyTypeChecker
68
120
def test_streaming_chat_completion (sentry_init , capture_events ):
69
121
sentry_init (
@@ -121,7 +173,7 @@ def test_streaming_chat_completion(sentry_init, capture_events):
121
173
tx = events [0 ]
122
174
assert tx ["type" ] == "transaction"
123
175
span = tx ["spans" ][0 ]
124
- assert span ["op" ] == "openai .chat_completions.create"
176
+ assert span ["op" ] == "ai .chat_completions.create.openai "
125
177
126
178
try :
127
179
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
@@ -134,9 +186,7 @@ def test_streaming_chat_completion(sentry_init, capture_events):
134
186
135
187
136
188
def test_bad_chat_completion (sentry_init , capture_events ):
137
- sentry_init (
138
- integrations = [OpenAIIntegration (include_prompts = True )], traces_sample_rate = 1.0
139
- )
189
+ sentry_init (integrations = [OpenAIIntegration ()], traces_sample_rate = 1.0 )
140
190
events = capture_events ()
141
191
142
192
client = OpenAI (api_key = "z" )
@@ -153,9 +203,7 @@ def test_bad_chat_completion(sentry_init, capture_events):
153
203
154
204
155
205
def test_embeddings_create (sentry_init , capture_events ):
156
- sentry_init (
157
- integrations = [OpenAIIntegration (include_prompts = True )], traces_sample_rate = 1.0
158
- )
206
+ sentry_init (integrations = [OpenAIIntegration ()], traces_sample_rate = 1.0 )
159
207
events = capture_events ()
160
208
161
209
client = OpenAI (api_key = "z" )
@@ -181,7 +229,7 @@ def test_embeddings_create(sentry_init, capture_events):
181
229
tx = events [0 ]
182
230
assert tx ["type" ] == "transaction"
183
231
span = tx ["spans" ][0 ]
184
- assert span ["op" ] == "openai .embeddings.create"
232
+ assert span ["op" ] == "ai .embeddings.create.openai "
185
233
186
234
assert span ["data" ][PROMPT_TOKENS_USED ] == 20
187
235
assert span ["data" ][TOTAL_TOKENS_USED ] == 30
0 commit comments