@@ -233,23 +233,84 @@ def test_aof_rewrite_tf_model(self):
233
233
234
234
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
235
235
self .env .restartAndReload ()
236
- _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout = con .execute_command ("AI.MODELGET" , key_name , "META" )
237
- self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ], [b"TF" , b"CPU" , b"TF_GRAPH" , 4 , 2 , 1000 , [b"a" , b"b" ], [b"mul" ]])
236
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
237
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
238
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
239
+ [b"TF" , b"CPU" , b"TF_GRAPH" , 4 , 2 , 1000 , [b"a" , b"b" ], [b"mul" ]])
238
240
tf_model_run (self .env , key_name )
239
241
242
+ # Reinsert the model (without minbatchtimeout)
243
+ con .execute_command ('AI.MODELSTORE' , key_name , 'TF' , 'CPU' , 'TAG' , 'TF_GRAPH1' , 'batchsize' , 4 , 'minbatchsize' , 2 ,
244
+ 'INPUTS' , 2 , 'a' , 'b' , 'OUTPUTS' , 1 , 'mul' , 'BLOB' , tf_model )
245
+ # Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
246
+ self .env .restartAndReload ()
247
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
248
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
249
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
250
+ [b"TF" , b"CPU" , b"TF_GRAPH1" , 4 , 2 , 0 , [b"a" , b"b" ], [b"mul" ]])
251
+
252
+ # Reinsert the model (without minbatch)
253
+ con .execute_command ('AI.MODELSTORE' , key_name , 'TF' , 'CPU' , 'TAG' , 'TF_GRAPH2' , 'batchsize' , 4 ,
254
+ 'INPUTS' , 2 , 'a' , 'b' , 'OUTPUTS' , 1 , 'mul' , 'BLOB' , tf_model )
255
+ # Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
256
+ self .env .restartAndReload ()
257
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
258
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
259
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
260
+ [b"TF" , b"CPU" , b"TF_GRAPH2" , 4 , 0 , 0 , [b"a" , b"b" ], [b"mul" ]])
261
+
262
+ # Reinsert the model (without batching)
263
+ con .execute_command ('AI.MODELSTORE' , key_name , 'TF' , 'CPU' , 'TAG' , 'TF_GRAPH3' ,
264
+ 'INPUTS' , 2 , 'a' , 'b' , 'OUTPUTS' , 1 , 'mul' , 'BLOB' , tf_model )
265
+ # Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
266
+ self .env .restartAndReload ()
267
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
268
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
269
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
270
+ [b"TF" , b"CPU" , b"TF_GRAPH3" , 0 , 0 , 0 , [b"a" , b"b" ], [b"mul" ]])
271
+
240
272
def test_aof_rewrite_torch_model (self ):
241
273
key_name = "pt-minimal{1}"
242
274
con = self .env .getConnection ()
243
275
torch_model = load_file_content ("pt-minimal.pt" )
244
276
con .execute_command ('AI.MODELSTORE' , key_name , 'TORCH' , 'CPU' , 'TAG' , 'PT_MINIMAL' , 'batchsize' , 4 , 'minbatchsize' , 2 ,
245
- 'BLOB' , torch_model )
277
+ 'minbatchtimeout' , 1000 , ' BLOB' , torch_model )
246
278
247
279
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
248
280
self .env .restartAndReload ()
249
- _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout = con .execute_command ("AI.MODELGET" , key_name , "META" )
250
- self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ], [b"TORCH" , b"CPU" , b"PT_MINIMAL" , 4 , 2 , 0 , [b"a" , b"b" ], [b'' ]])
281
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
282
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
283
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
284
+ [b"TORCH" , b"CPU" , b"PT_MINIMAL" , 4 , 2 , 1000 , [b"a" , b"b" ], [b'' ]])
251
285
torch_model_run (self .env , key_name )
252
286
287
+ # Reinsert the model (without minbatchtimeout)
288
+ con .execute_command ('AI.MODELSTORE' , key_name , 'TORCH' , 'CPU' , 'TAG' , 'PT_MINIMAL1' , 'batchsize' , 4 , 'minbatchsize' , 2 ,
289
+ 'BLOB' , torch_model )
290
+ self .env .restartAndReload ()
291
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
292
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
293
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
294
+ [b"TORCH" , b"CPU" , b"PT_MINIMAL1" , 4 , 2 , 0 , [b"a" , b"b" ], [b'' ]])
295
+
296
+ # Reinsert the model (without minbatch)
297
+ con .execute_command ('AI.MODELSTORE' , key_name , 'TORCH' , 'CPU' , 'TAG' , 'PT_MINIMAL2' , 'batchsize' , 4 ,
298
+ 'BLOB' , torch_model )
299
+ self .env .restartAndReload ()
300
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
301
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
302
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
303
+ [b"TORCH" , b"CPU" , b"PT_MINIMAL2" , 4 , 0 , 0 , [b"a" , b"b" ], [b'' ]])
304
+
305
+ # Reinsert the model (without batching)
306
+ con .execute_command ('AI.MODELSTORE' , key_name , 'TORCH' , 'CPU' , 'TAG' , 'PT_MINIMAL3' ,
307
+ 'BLOB' , torch_model )
308
+ self .env .restartAndReload ()
309
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
310
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
311
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
312
+ [b"TORCH" , b"CPU" , b"PT_MINIMAL3" , 0 , 0 , 0 , [b"a" , b"b" ], [b'' ]])
313
+
253
314
def test_aof_rewrite_troch_script (self ):
254
315
key_name = "torch_script{1}"
255
316
con = self .env .getConnection ()
@@ -266,13 +327,43 @@ def test_aof_rewrite_onnx_model(self):
266
327
key_name = "linear_iris{1}"
267
328
con = self .env .getConnection ()
268
329
onnx_model = load_file_content ("linear_iris.onnx" )
269
- con .execute_command ('AI.MODELSTORE' , key_name , 'ONNX' , 'CPU' , 'TAG' , 'ONNX_LINEAR_IRIS' , 'batchsize' , 4 , 'BLOB' , onnx_model )
330
+ con .execute_command ('AI.MODELSTORE' , key_name , 'ONNX' , 'CPU' , 'TAG' , 'ONNX_LINEAR_IRIS' , 'batchsize' , 4 , 'minbatchsize' , 2 ,
331
+ 'minbatchtimeout' , 1000 , 'BLOB' , onnx_model )
270
332
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
271
333
self .env .restartAndReload ()
272
- _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout = con .execute_command ("AI.MODELGET" , key_name , "META" )
273
- self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ], [b"ONNX" , b"CPU" , b"ONNX_LINEAR_IRIS" , 4 , 0 , 0 , [b'float_input' ], [b'variable' ]])
334
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
335
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
336
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
337
+ [b"ONNX" , b"CPU" , b"ONNX_LINEAR_IRIS" , 4 , 2 , 1000 , [b'float_input' ], [b'variable' ]])
274
338
onnx_model_run (self .env , key_name )
275
339
340
+ # Reinsert the model (without minbatchtimeout)
341
+ con .execute_command ('AI.MODELSTORE' , key_name , 'ONNX' , 'CPU' , 'TAG' , 'ONNX_LINEAR_IRIS1' , 'batchsize' , 4 ,
342
+ 'minbatchsize' , 2 , 'BLOB' , onnx_model )
343
+ self .env .restartAndReload ()
344
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
345
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
346
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
347
+ [b"ONNX" , b"CPU" , b"ONNX_LINEAR_IRIS1" , 4 , 2 , 0 , [b'float_input' ], [b'variable' ]])
348
+
349
+ # Reinsert the model (without minbatch)
350
+ con .execute_command ('AI.MODELSTORE' , key_name , 'ONNX' , 'CPU' , 'TAG' , 'ONNX_LINEAR_IRIS2' , 'batchsize' , 4 ,
351
+ 'BLOB' , onnx_model )
352
+ self .env .restartAndReload ()
353
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
354
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
355
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
356
+ [b"ONNX" , b"CPU" , b"ONNX_LINEAR_IRIS2" , 4 , 0 , 0 , [b'float_input' ], [b'variable' ]])
357
+
358
+ # Reinsert the model (without batching)
359
+ con .execute_command ('AI.MODELSTORE' , key_name , 'ONNX' , 'CPU' , 'TAG' , 'ONNX_LINEAR_IRIS3' ,
360
+ 'BLOB' , onnx_model )
361
+ self .env .restartAndReload ()
362
+ _ , backend , _ , device , _ , tag , _ , batchsize , _ , minbatchsize , _ , inputs , _ , outputs , _ , minbatchtimeout \
363
+ = con .execute_command ("AI.MODELGET" , key_name , "META" )
364
+ self .env .assertEqual ([backend , device , tag , batchsize , minbatchsize , minbatchtimeout , inputs , outputs ],
365
+ [b"ONNX" , b"CPU" , b"ONNX_LINEAR_IRIS3" , 0 , 0 , 0 , [b'float_input' ], [b'variable' ]])
366
+
276
367
def test_aof_rewrite_tensor (self ):
277
368
key_name = "tensor{1}"
278
369
con = self .env .getConnection ()
0 commit comments