@@ -20,7 +20,19 @@ import 'jasmine';
20
20
21
21
import { ChatSession , GenerativeModel , StartChatParams , VertexAI } from './index' ;
22
22
import * as StreamFunctions from './process_stream' ;
23
- import { CountTokensRequest , FinishReason , GenerateContentRequest , GenerateContentResponse , GenerateContentResult , HarmBlockThreshold , HarmCategory , StreamGenerateContentResult , } from './types/content' ;
23
+ import {
24
+ CountTokensRequest ,
25
+ FinishReason ,
26
+ GenerateContentRequest ,
27
+ GenerateContentResponse ,
28
+ GenerateContentResult ,
29
+ HarmBlockThreshold ,
30
+ HarmCategory ,
31
+ HarmProbability ,
32
+ SafetyRating ,
33
+ SafetySetting ,
34
+ StreamGenerateContentResult ,
35
+ } from './types/content' ;
24
36
import { constants } from './util' ;
25
37
26
38
const PROJECT = 'test_project' ;
@@ -38,8 +50,8 @@ const TEST_USER_CHAT_MESSAGE_WITH_GCS_FILE = [
38
50
{
39
51
file_data : {
40
52
file_uri : 'gs://test_bucket/test_image.jpeg' ,
41
- mime_type : 'image/jpeg'
42
- }
53
+ mime_type : 'image/jpeg' ,
54
+ } ,
43
55
} ,
44
56
] ,
45
57
} ,
@@ -55,10 +67,17 @@ const TEST_USER_CHAT_MESSAGE_WITH_INVALID_GCS_FILE = [
55
67
} ,
56
68
] ;
57
69
58
- const TEST_SAFETY_RATINGS = [
70
+ const TEST_SAFETY_SETTINGS : SafetySetting [ ] = [
59
71
{
60
72
category : HarmCategory . HARM_CATEGORY_HATE_SPEECH ,
61
- threshold : HarmBlockThreshold . BLOCK_MEDIUM_AND_ABOVE ,
73
+ threshold : HarmBlockThreshold . BLOCK_ONLY_HIGH ,
74
+ } ,
75
+ ] ;
76
+
77
+ const TEST_SAFETY_RATINGS : SafetyRating [ ] = [
78
+ {
79
+ category : HarmCategory . HARM_CATEGORY_HATE_SPEECH ,
80
+ probability : HarmProbability . NEGLIGIBLE ,
62
81
} ,
63
82
] ;
64
83
const TEST_GENERATION_CONFIG = {
@@ -76,13 +95,14 @@ const TEST_CANDIDATES = [
76
95
finishMessage : '' ,
77
96
safetyRatings : TEST_SAFETY_RATINGS ,
78
97
citationMetadata : {
79
- citationSources : [ {
80
- startIndex : 367 ,
81
- endIndex : 491 ,
82
- uri :
83
- 'https://www.numerade.com/ask/question/why-does-the-uncertainty-principle-make-it-impossible-to-predict-a-trajectory-for-the-clectron-95172/'
84
- } ]
85
- }
98
+ citationSources : [
99
+ {
100
+ startIndex : 367 ,
101
+ endIndex : 491 ,
102
+ uri : 'https://www.numerade.com/ask/question/why-does-the-uncertainty-principle-make-it-impossible-to-predict-a-trajectory-for-the-clectron-95172/' ,
103
+ } ,
104
+ ] ,
105
+ } ,
86
106
} ,
87
107
] ;
88
108
const TEST_MODEL_RESPONSE = {
@@ -178,8 +198,9 @@ describe('VertexAI', () => {
178
198
response : Promise . resolve ( TEST_MODEL_RESPONSE ) ,
179
199
stream : testGenerator ( ) ,
180
200
} ;
181
- spyOn ( StreamFunctions , 'processStream' )
182
- . and . returnValue ( expectedStreamResult ) ;
201
+ spyOn ( StreamFunctions , 'processStream' ) . and . returnValue (
202
+ expectedStreamResult
203
+ ) ;
183
204
const resp = await model . generateContent ( req ) ;
184
205
expect ( resp ) . toEqual ( expectedResult ) ;
185
206
} ) ;
@@ -190,15 +211,17 @@ describe('VertexAI', () => {
190
211
const req : GenerateContentRequest = {
191
212
contents : TEST_USER_CHAT_MESSAGE_WITH_INVALID_GCS_FILE ,
192
213
} ;
193
- await expectAsync ( model . generateContent ( req ) ) . toBeRejectedWithError ( URIError ) ;
214
+ await expectAsync ( model . generateContent ( req ) ) . toBeRejectedWithError (
215
+ URIError
216
+ ) ;
194
217
} ) ;
195
218
} ) ;
196
219
197
220
describe ( 'generateContent' , ( ) => {
198
221
it ( 'returns a GenerateContentResponse when passed safety_settings and generation_config' , async ( ) => {
199
222
const req : GenerateContentRequest = {
200
223
contents : TEST_USER_CHAT_MESSAGE ,
201
- safety_settings : TEST_SAFETY_RATINGS ,
224
+ safety_settings : TEST_SAFETY_SETTINGS ,
202
225
generation_config : TEST_GENERATION_CONFIG ,
203
226
} ;
204
227
const expectedResult : GenerateContentResult = {
@@ -223,7 +246,8 @@ describe('VertexAI', () => {
223
246
location : LOCATION ,
224
247
apiEndpoint : TEST_ENDPOINT_BASE_PATH ,
225
248
} ) ;
226
- vertexaiWithBasePath . preview [ 'tokenInternalPromise' ] = Promise . resolve ( TEST_TOKEN ) ;
249
+ vertexaiWithBasePath . preview [ 'tokenInternalPromise' ] =
250
+ Promise . resolve ( TEST_TOKEN ) ;
227
251
model = vertexaiWithBasePath . preview . getGenerativeModel ( {
228
252
model : 'gemini-pro' ,
229
253
} ) ;
@@ -255,7 +279,8 @@ describe('VertexAI', () => {
255
279
project : PROJECT ,
256
280
location : LOCATION ,
257
281
} ) ;
258
- vertexaiWithoutBasePath . preview [ 'tokenInternalPromise' ] = Promise . resolve ( TEST_TOKEN ) ;
282
+ vertexaiWithoutBasePath . preview [ 'tokenInternalPromise' ] =
283
+ Promise . resolve ( TEST_TOKEN ) ;
259
284
model = vertexaiWithoutBasePath . preview . getGenerativeModel ( {
260
285
model : 'gemini-pro' ,
261
286
} ) ;
@@ -275,8 +300,9 @@ describe('VertexAI', () => {
275
300
expectedStreamResult
276
301
) ;
277
302
await model . generateContent ( req ) ;
278
- expect ( requestSpy . calls . allArgs ( ) [ 0 ] [ 0 ] . toString ( ) )
279
- . toContain ( `${ LOCATION } -aiplatform.googleapis.com` ) ;
303
+ expect ( requestSpy . calls . allArgs ( ) [ 0 ] [ 0 ] . toString ( ) ) . toContain (
304
+ `${ LOCATION } -aiplatform.googleapis.com`
305
+ ) ;
280
306
} ) ;
281
307
} ) ;
282
308
@@ -295,11 +321,12 @@ describe('VertexAI', () => {
295
321
stream : testGenerator ( ) ,
296
322
} ;
297
323
const requestSpy = spyOn ( global , 'fetch' ) ;
298
- spyOn ( StreamFunctions , 'processStream' )
299
- . and . returnValue ( expectedStreamResult ) ;
324
+ spyOn ( StreamFunctions , 'processStream' ) . and . returnValue (
325
+ expectedStreamResult
326
+ ) ;
300
327
await model . generateContent ( reqWithEmptyConfigs ) ;
301
328
const requestArgs = requestSpy . calls . allArgs ( ) [ 0 ] [ 1 ] ;
302
- if ( typeof requestArgs == 'object' && requestArgs ) {
329
+ if ( typeof requestArgs === 'object' && requestArgs ) {
303
330
expect ( JSON . stringify ( requestArgs [ 'body' ] ) ) . not . toContain ( 'top_k' ) ;
304
331
}
305
332
} ) ;
@@ -320,11 +347,12 @@ describe('VertexAI', () => {
320
347
stream : testGenerator ( ) ,
321
348
} ;
322
349
const requestSpy = spyOn ( global , 'fetch' ) ;
323
- spyOn ( StreamFunctions , 'processStream' )
324
- . and . returnValue ( expectedStreamResult ) ;
350
+ spyOn ( StreamFunctions , 'processStream' ) . and . returnValue (
351
+ expectedStreamResult
352
+ ) ;
325
353
await model . generateContent ( reqWithEmptyConfigs ) ;
326
354
const requestArgs = requestSpy . calls . allArgs ( ) [ 0 ] [ 1 ] ;
327
- if ( typeof requestArgs == 'object' && requestArgs ) {
355
+ if ( typeof requestArgs === 'object' && requestArgs ) {
328
356
expect ( JSON . stringify ( requestArgs [ 'body' ] ) ) . toContain ( 'top_k' ) ;
329
357
}
330
358
} ) ;
@@ -342,14 +370,17 @@ describe('VertexAI', () => {
342
370
response : Promise . resolve ( TEST_MODEL_RESPONSE ) ,
343
371
stream : testGenerator ( ) ,
344
372
} ;
345
- spyOn ( StreamFunctions , 'processStream' )
346
- . and . returnValue ( expectedStreamResult ) ;
373
+ spyOn ( StreamFunctions , 'processStream' ) . and . returnValue (
374
+ expectedStreamResult
375
+ ) ;
347
376
const resp = await model . generateContent ( req ) ;
348
377
console . log ( resp . response . candidates [ 0 ] . citationMetadata , 'yoyoyo' ) ;
349
378
expect (
350
- resp . response . candidates [ 0 ] . citationMetadata ?. citationSources . length )
351
- . toEqual ( TEST_MODEL_RESPONSE . candidates [ 0 ]
352
- . citationMetadata . citationSources . length ) ;
379
+ resp . response . candidates [ 0 ] . citationMetadata ?. citationSources . length
380
+ ) . toEqual (
381
+ TEST_MODEL_RESPONSE . candidates [ 0 ] . citationMetadata . citationSources
382
+ . length
383
+ ) ;
353
384
} ) ;
354
385
} ) ;
355
386
0 commit comments