Skip to content

Commit c0b058d

Browse files
committed
Extend tests for every backend and config.
1 parent be58e71 commit c0b058d

File tree

1 file changed

+99
-8
lines changed

1 file changed

+99
-8
lines changed

tests/flow/test_serializations.py

Lines changed: 99 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -233,23 +233,84 @@ def test_aof_rewrite_tf_model(self):
233233

234234
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
235235
self.env.restartAndReload()
236-
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout = con.execute_command("AI.MODELGET", key_name, "META")
237-
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs], [b"TF", b"CPU", b"TF_GRAPH", 4, 2, 1000, [b"a", b"b"], [b"mul"]])
236+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout\
237+
= con.execute_command("AI.MODELGET", key_name, "META")
238+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
239+
[b"TF", b"CPU", b"TF_GRAPH", 4, 2, 1000, [b"a", b"b"], [b"mul"]])
238240
tf_model_run(self.env, key_name)
239241

242+
# Reinsert the model (without minbatchtimeout)
243+
con.execute_command('AI.MODELSTORE', key_name, 'TF', 'CPU', 'TAG', 'TF_GRAPH1', 'batchsize', 4, 'minbatchsize', 2,
244+
'INPUTS', 2, 'a', 'b', 'OUTPUTS', 1, 'mul', 'BLOB', tf_model)
245+
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
246+
self.env.restartAndReload()
247+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout\
248+
= con.execute_command("AI.MODELGET", key_name, "META")
249+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
250+
[b"TF", b"CPU", b"TF_GRAPH1", 4, 2, 0, [b"a", b"b"], [b"mul"]])
251+
252+
# Reinsert the model (without minbatch)
253+
con.execute_command('AI.MODELSTORE', key_name, 'TF', 'CPU', 'TAG', 'TF_GRAPH2', 'batchsize', 4,
254+
'INPUTS', 2, 'a', 'b', 'OUTPUTS', 1, 'mul', 'BLOB', tf_model)
255+
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
256+
self.env.restartAndReload()
257+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
258+
= con.execute_command("AI.MODELGET", key_name, "META")
259+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
260+
[b"TF", b"CPU", b"TF_GRAPH2", 4, 0, 0, [b"a", b"b"], [b"mul"]])
261+
262+
# Reinsert the model (without batching)
263+
con.execute_command('AI.MODELSTORE', key_name, 'TF', 'CPU', 'TAG', 'TF_GRAPH3',
264+
'INPUTS', 2, 'a', 'b', 'OUTPUTS', 1, 'mul', 'BLOB', tf_model)
265+
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
266+
self.env.restartAndReload()
267+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
268+
= con.execute_command("AI.MODELGET", key_name, "META")
269+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
270+
[b"TF", b"CPU", b"TF_GRAPH3", 0, 0, 0, [b"a", b"b"], [b"mul"]])
271+
240272
def test_aof_rewrite_torch_model(self):
241273
key_name = "pt-minimal{1}"
242274
con = self.env.getConnection()
243275
torch_model = load_file_content("pt-minimal.pt")
244276
con.execute_command('AI.MODELSTORE', key_name, 'TORCH', 'CPU', 'TAG', 'PT_MINIMAL', 'batchsize', 4, 'minbatchsize', 2,
245-
'BLOB', torch_model)
277+
'minbatchtimeout', 1000, 'BLOB', torch_model)
246278

247279
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
248280
self.env.restartAndReload()
249-
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout = con.execute_command("AI.MODELGET", key_name, "META")
250-
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs], [b"TORCH", b"CPU", b"PT_MINIMAL", 4, 2, 0, [b"a", b"b"], [b'']])
281+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout\
282+
= con.execute_command("AI.MODELGET", key_name, "META")
283+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
284+
[b"TORCH", b"CPU", b"PT_MINIMAL", 4, 2, 1000, [b"a", b"b"], [b'']])
251285
torch_model_run(self.env, key_name)
252286

287+
# Reinsert the model (without minbatchtimeout)
288+
con.execute_command('AI.MODELSTORE', key_name, 'TORCH', 'CPU', 'TAG', 'PT_MINIMAL1', 'batchsize', 4, 'minbatchsize', 2,
289+
'BLOB', torch_model)
290+
self.env.restartAndReload()
291+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
292+
= con.execute_command("AI.MODELGET", key_name, "META")
293+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
294+
[b"TORCH", b"CPU", b"PT_MINIMAL1", 4, 2, 0, [b"a", b"b"], [b'']])
295+
296+
# Reinsert the model (without minbatch)
297+
con.execute_command('AI.MODELSTORE', key_name, 'TORCH', 'CPU', 'TAG', 'PT_MINIMAL2', 'batchsize', 4,
298+
'BLOB', torch_model)
299+
self.env.restartAndReload()
300+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
301+
= con.execute_command("AI.MODELGET", key_name, "META")
302+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
303+
[b"TORCH", b"CPU", b"PT_MINIMAL2", 4, 0, 0, [b"a", b"b"], [b'']])
304+
305+
# Reinsert the model (without batching)
306+
con.execute_command('AI.MODELSTORE', key_name, 'TORCH', 'CPU', 'TAG', 'PT_MINIMAL3',
307+
'BLOB', torch_model)
308+
self.env.restartAndReload()
309+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
310+
= con.execute_command("AI.MODELGET", key_name, "META")
311+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
312+
[b"TORCH", b"CPU", b"PT_MINIMAL3", 0, 0, 0, [b"a", b"b"], [b'']])
313+
253314
def test_aof_rewrite_troch_script(self):
254315
key_name = "torch_script{1}"
255316
con = self.env.getConnection()
@@ -266,13 +327,43 @@ def test_aof_rewrite_onnx_model(self):
266327
key_name = "linear_iris{1}"
267328
con = self.env.getConnection()
268329
onnx_model = load_file_content("linear_iris.onnx")
269-
con.execute_command('AI.MODELSTORE', key_name, 'ONNX', 'CPU', 'TAG', 'ONNX_LINEAR_IRIS', 'batchsize', 4, 'BLOB', onnx_model)
330+
con.execute_command('AI.MODELSTORE', key_name, 'ONNX', 'CPU', 'TAG', 'ONNX_LINEAR_IRIS', 'batchsize', 4, 'minbatchsize', 2,
331+
'minbatchtimeout', 1000, 'BLOB', onnx_model)
270332
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
271333
self.env.restartAndReload()
272-
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout = con.execute_command("AI.MODELGET", key_name, "META")
273-
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs], [b"ONNX", b"CPU", b"ONNX_LINEAR_IRIS", 4, 0, 0, [b'float_input'], [b'variable']])
334+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout\
335+
= con.execute_command("AI.MODELGET", key_name, "META")
336+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
337+
[b"ONNX", b"CPU", b"ONNX_LINEAR_IRIS", 4, 2, 1000, [b'float_input'], [b'variable']])
274338
onnx_model_run(self.env, key_name)
275339

340+
# Reinsert the model (without minbatchtimeout)
341+
con.execute_command('AI.MODELSTORE', key_name, 'ONNX', 'CPU', 'TAG', 'ONNX_LINEAR_IRIS1', 'batchsize', 4,
342+
'minbatchsize', 2, 'BLOB', onnx_model)
343+
self.env.restartAndReload()
344+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
345+
= con.execute_command("AI.MODELGET", key_name, "META")
346+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
347+
[b"ONNX", b"CPU", b"ONNX_LINEAR_IRIS1", 4, 2, 0, [b'float_input'], [b'variable']])
348+
349+
# Reinsert the model (without minbatch)
350+
con.execute_command('AI.MODELSTORE', key_name, 'ONNX', 'CPU', 'TAG', 'ONNX_LINEAR_IRIS2', 'batchsize', 4,
351+
'BLOB', onnx_model)
352+
self.env.restartAndReload()
353+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
354+
= con.execute_command("AI.MODELGET", key_name, "META")
355+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
356+
[b"ONNX", b"CPU", b"ONNX_LINEAR_IRIS2", 4, 0, 0, [b'float_input'], [b'variable']])
357+
358+
# Reinsert the model (without batching)
359+
con.execute_command('AI.MODELSTORE', key_name, 'ONNX', 'CPU', 'TAG', 'ONNX_LINEAR_IRIS3',
360+
'BLOB', onnx_model)
361+
self.env.restartAndReload()
362+
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
363+
= con.execute_command("AI.MODELGET", key_name, "META")
364+
self.env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
365+
[b"ONNX", b"CPU", b"ONNX_LINEAR_IRIS3", 0, 0, 0, [b'float_input'], [b'variable']])
366+
276367
def test_aof_rewrite_tensor(self):
277368
key_name = "tensor{1}"
278369
con = self.env.getConnection()

0 commit comments

Comments
 (0)