"
]
},
"metadata": {},
"output_type": "display_data"
},
{
- "data": {
- "text/html": [
- "\n"
- ],
- "text/plain": []
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stdout",
+ "name": "stderr",
"output_type": "stream",
"text": [
- "β
Weights compression finished\n",
- "β Convert Image embedding model\n",
- "β Weights compression with int4_asym mode started\n",
- "INFO:nncf:Statistics of the bitwidth distribution:\n",
- "ββββββββββββββββββ―ββββββββββββββββββββββββββββββ―βββββββββββββββββββββββββββββββββββββββββ\n",
- "β Num bits (N) β % all parameters (layers) β % ratio-defining parameters (layers) β\n",
- "ββββββββββββββββββΏββββββββββββββββββββββββββββββΏβββββββββββββββββββββββββββββββββββββββββ₯\n",
- "β 8 β 3% (1 / 130) β 0% (0 / 129) β\n",
- "ββββββββββββββββββΌββββββββββββββββββββββββββββββΌβββββββββββββββββββββββββββββββββββββββββ€\n",
- "β 4 β 97% (129 / 130) β 100% (129 / 129) β\n",
- "ββββββββββββββββββ·ββββββββββββββββββββββββββββββ·βββββββββββββββββββββββββββββββββββββββββ\n"
+ "2024-12-24 18:27:51.174286: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
+ "2024-12-24 18:27:51.186686: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n",
+ "E0000 00:00:1735050471.201093 340500 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "E0000 00:00:1735050471.205249 340500 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2024-12-24 18:27:51.219846: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
+ "To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
+ "Downloading shards: 100%|ββββββββββ| 2/2 [00:00<00:00, 2.73it/s]\n",
+ "`Qwen2VLRotaryEmbedding` can now be fully parameterized by passing the model config through the `config` argument. All other arguments will be removed in v4.46\n",
+ "Loading checkpoint shards: 100%|ββββββββββ| 2/2 [00:02<00:00, 1.46s/it]\n",
+ "`loss_type=None` was set in the config but it is unrecognised.Using the default loss: `ForCausalLMLoss`.\n",
+ "/home/ea/work/py311/lib/python3.11/site-packages/transformers/cache_utils.py:458: TracerWarning: Using len to get tensor shape might cause the trace to be incorrect. Recommended usage would be tensor.shape[0]. Passing a tensor of different shape might lead to errors or silently give incorrect results.\n",
+ " or len(self.key_cache[layer_idx]) == 0 # the layer has no cache\n",
+ "/home/ea/work/py311/lib/python3.11/site-packages/transformers/modeling_attn_mask_utils.py:281: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
+ " elif sliding_window is None or key_value_length < sliding_window:\n",
+ "/home/ea/work/py311/lib/python3.11/site-packages/transformers/models/qwen2_vl/modeling_qwen2_vl.py:1329: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
+ " if attention_mask.shape[-1] > target_length:\n",
+ "/home/ea/work/py311/lib/python3.11/site-packages/transformers/cache_utils.py:443: TracerWarning: Using len to get tensor shape might cause the trace to be incorrect. Recommended usage would be tensor.shape[0]. Passing a tensor of different shape might lead to errors or silently give incorrect results.\n",
+ " elif len(self.key_cache[layer_idx]) == 0: # fills previously skipped layers; checking for tensor causes errors\n"
]
},
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "e7e227188f8041f2a446ff3a1159261c",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Output()"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/html": [
- "\n"
- ],
- "text/plain": []
- },
- "metadata": {},
- "output_type": "display_data"
- },
{
"name": "stdout",
"output_type": "stream",
"text": [
- "β
Weights compression finished\n",
- "β
Image embedding model successfully converted\n",
- "β
Qwen/Qwen2-VL-7B-Instruct model conversion finished. You can find results in Qwen2-VL-7B-Instruct\n"
+ "INFO:nncf:Statistics of the bitwidth distribution:\n",
+ "βββββββββββββββββββββββββββββ―ββββββββββββββββββββββββββββββ―βββββββββββββββββββββββββββββββββββββββββ\n",
+ "β Weight compression mode β % all parameters (layers) β % ratio-defining parameters (layers) β\n",
+ "βββββββββββββββββββββββββββββΏββββββββββββββββββββββββββββββΏβββββββββββββββββββββββββββββββββββββββββ₯\n",
+ "β int8_asym β 15% (1 / 197) β 0% (0 / 196) β\n",
+ "βββββββββββββββββββββββββββββΌββββββββββββββββββββββββββββββΌβββββββββββββββββββββββββββββββββββββββββ€\n",
+ "β int4_asym β 85% (196 / 197) β 100% (196 / 196) β\n",
+ "βββββββββββββββββββββββββββββ·ββββββββββββββββββββββββββββββ·βββββββββββββββββββββββββββββββββββββββββ\n",
+ "\u001b[2KApplying Weight Compression \u001b[38;2;114;156;31mβββββββββββββββββββββββββββ\u001b[0m \u001b[35m100%\u001b[0m β’ \u001b[38;2;0;104;181m0:00:45\u001b[0m β’ \u001b[38;2;0;104;181m0:00:00\u001b[0m;0;104;181m0:00:01\u001b[0m181m0:00:02\u001b[0m\n",
+ "\u001b[?25hINFO:nncf:Statistics of the bitwidth distribution:\n",
+ "βββββββββββββββββββββββββββββ―ββββββββββββββββββββββββββββββ―βββββββββββββββββββββββββββββββββββββββββ\n",
+ "β Weight compression mode β % all parameters (layers) β % ratio-defining parameters (layers) β\n",
+ "βββββββββββββββββββββββββββββΏββββββββββββββββββββββββββββββΏβββββββββββββββββββββββββββββββββββββββββ₯\n",
+ "β int8_sym β 100% (1 / 1) β 100% (1 / 1) β\n",
+ "βββββββββββββββββββββββββββββ·ββββββββββββββββββββββββββββββ·βββββββββββββββββββββββββββββββββββββββββ\n",
+ "\u001b[2KApplying Weight Compression \u001b[38;2;114;156;31mβββββββββββββββββββββββββββ\u001b[0m \u001b[35m100%\u001b[0m β’ \u001b[38;2;0;104;181m0:00:00\u001b[0m β’ \u001b[38;2;0;104;181m0:00:00\u001b[0m\n",
+ "\u001b[?25hINFO:nncf:Statistics of the bitwidth distribution:\n",
+ "βββββββββββββββββββββββββββββ―ββββββββββββββββββββββββββββββ―βββββββββββββββββββββββββββββββββββββββββ\n",
+ "β Weight compression mode β % all parameters (layers) β % ratio-defining parameters (layers) β\n",
+ "βββββββββββββββββββββββββββββΏββββββββββββββββββββββββββββββΏβββββββββββββββββββββββββββββββββββββββββ₯\n",
+ "β int8_sym β 100% (1 / 1) β 100% (1 / 1) β\n",
+ "βββββββββββββββββββββββββββββ·ββββββββββββββββββββββββββββββ·βββββββββββββββββββββββββββββββββββββββββ\n",
+ "\u001b[2KApplying Weight Compression \u001b[38;2;114;156;31mβββββββββββββββββββββββββββ\u001b[0m \u001b[35m100%\u001b[0m β’ \u001b[38;2;0;104;181m0:00:01\u001b[0m β’ \u001b[38;2;0;104;181m0:00:00\u001b[0m\n",
+ "\u001b[?25hINFO:nncf:Statistics of the bitwidth distribution:\n",
+ "βββββββββββββββββββββββββββββ―ββββββββββββββββββββββββββββββ―βββββββββββββββββββββββββββββββββββββββββ\n",
+ "β Weight compression mode β % all parameters (layers) β % ratio-defining parameters (layers) β\n",
+ "βββββββββββββββββββββββββββββΏββββββββββββββββββββββββββββββΏβββββββββββββββββββββββββββββββββββββββββ₯\n",
+ "β int8_sym β 100% (130 / 130) β 100% (130 / 130) β\n",
+ "βββββββββββββββββββββββββββββ·ββββββββββββββββββββββββββββββ·βββββββββββββββββββββββββββββββββββββββββ\n",
+ "\u001b[2KApplying Weight Compression \u001b[38;2;114;156;31mβββββββββββββββββββββββββββ\u001b[0m \u001b[35m100%\u001b[0m β’ \u001b[38;2;0;104;181m0:00:03\u001b[0m β’ \u001b[38;2;0;104;181m0:00:00\u001b[0m02\u001b[0m β’ \u001b[38;2;0;104;181m0:00:01\u001b[0m\n",
+ "\u001b[?25h"
]
}
],
"source": [
- "import nncf\n",
+ "from cmd_helper import optimum_cli\n",
"\n",
- "compression_configuration = {\n",
- " \"mode\": nncf.CompressWeightsMode.INT4_ASYM,\n",
- " \"group_size\": 128,\n",
- " \"ratio\": 1.0,\n",
- "}\n",
- "\n",
- "convert_qwen2vl_model(pt_model_id, model_dir, compression_configuration)"
+ "if not (model_dir / \"INT4\").exists():\n",
+ " optimum_cli(pt_model_id, model_dir / \"INT4\", additional_args={\"weight-format\": \"int4\"})"
]
},
{
@@ -400,19 +310,30 @@
"## Prepare model inference pipeline\n",
"[back to top β¬οΈ](#Table-of-contents:)\n",
"\n",
- "As discussed, the model comprises Image Encoder and LLM (with separated text embedding part) that generates answer. In `ov_qwen2_vl.py` we defined inference class `OVQwen2VLModel` that will represent generation cycle, It is based on [HuggingFace Transformers `GenerationMixin`](https://huggingface.co/docs/transformers/main_classes/text_generation) and looks similar to [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) `OVModelForCausalLM` that is used for LLM inference. "
+ "OpenVINO integration with Optimum Intel provides ready-to-use API for model inference that can be used for smooth integration with transformers-based solutions. For loading model, we will use `OVModelForVisualCausalLM` class that have compatible interface with Transformers LLaVA implementation. For loading a model, `from_pretrained` method should be used. It accepts path to the model directory or model_id from HuggingFace hub (if model is not converted to OpenVINO format, conversion will be triggered automatically). Additionally, we can provide an inference device, quantization config (if model has not been quantized yet) and device-specific OpenVINO Runtime configuration. More details about model inference with Optimum Intel can be found in [documentation](https://huggingface.co/docs/optimum/intel/openvino/inference)."
]
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 6,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "2024-12-24 18:30:03.136274: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
+ "2024-12-24 18:30:03.148865: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n",
+ "E0000 00:00:1735050603.163311 340474 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "E0000 00:00:1735050603.167677 340474 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2024-12-24 18:30:03.182551: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
+ "To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
+ ]
+ }
+ ],
"source": [
- "from ov_qwen2_vl import OVQwen2VLModel\n",
- "\n",
- "# Uncomment below lines to see the model inference class code\n",
- "# OVQwen2VLModel??"
+ "from optimum.intel.openvino import OVModelForVisualCausalLM"
]
},
{
@@ -426,13 +347,13 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
- "model_id": "df142541e95b4107b44ee9353f5e503a",
+ "model_id": "f75cab4cce234b378c0f9c5713e8202a",
"version_major": 2,
"version_minor": 0
},
@@ -440,7 +361,7 @@
"Dropdown(description='Device:', index=1, options=('CPU', 'AUTO'), value='AUTO')"
]
},
- "execution_count": 8,
+ "execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -457,9 +378,18 @@
"cell_type": "code",
"execution_count": 9,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Could not infer whether the model was already converted or not to the OpenVINO IR, keeping `export=AUTO`.\n",
+ "unsupported operand type(s) for ^: 'bool' and 'str'\n"
+ ]
+ }
+ ],
"source": [
- "model = OVQwen2VLModel(model_dir, device.value)"
+ "model = OVModelForVisualCausalLM.from_pretrained(model_dir / \"INT4\", device.value)"
]
},
{
@@ -473,7 +403,7 @@
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": 11,
"metadata": {
"tags": []
},
@@ -489,13 +419,6 @@
"metadata": {},
"output_type": "display_data"
},
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Setting `pad_token_id` to `eos_token_id`:None for open-end generation.\n"
- ]
- },
{
"name": "stdout",
"output_type": "stream",
@@ -503,7 +426,7 @@
"Question:\n",
"Describe this image.\n",
"Answer:\n",
- "The image depicts a serene beach scene at sunset. A woman and her dog are sitting on the sandy shore, enjoying each other's company. The woman is wearing a plaid shirt and has long hair. She is holding the dog's paw, and the dog is wearing a colorful harness. The dog appears to be a large breed, possibly a Labrador Retriever. The ocean is visible in the background, with gentle waves and a clear sky. The sun is setting, casting a warm glow over\n"
+ "The image depicts a woman sitting on a sandy beach with a large dog. The dog is wearing a harness and is sitting on its hind legs, reaching up to give a high-five to the woman. The woman is smiling and appears to be enjoying the moment. The background shows the ocean with gentle waves, and the sky is clear with a soft light, suggesting it might be either sunrise or sunset.\n"
]
}
],
@@ -516,7 +439,7 @@
"\n",
"min_pixels = 256 * 28 * 28\n",
"max_pixels = 1280 * 28 * 28\n",
- "processor = AutoProcessor.from_pretrained(model_dir, min_pixels=min_pixels, max_pixels=max_pixels)\n",
+ "processor = AutoProcessor.from_pretrained(model_dir / \"INT4\", min_pixels=min_pixels, max_pixels=max_pixels)\n",
"\n",
"if processor.chat_template is None:\n",
" tok = AutoTokenizer.from_pretrained(model_dir)\n",
@@ -565,7 +488,7 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 12,
"metadata": {
"tags": []
},
@@ -647,13 +570,13 @@
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"state": {
- "0a0a67436c09405a96c4c528eb3b567d": {
+ "31e4430151654c27ab36860fd5e4a4d6": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "2.0.0",
"model_name": "LayoutModel",
"state": {}
},
- "1ba257658c1a426c86e7ea47b1d0b24d": {
+ "3eaf3559d14d4d929f7aef2f3eb18eba": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
"model_name": "DescriptionStyleModel",
@@ -661,64 +584,7 @@
"description_width": ""
}
},
- "3d1f8206e8434610b41dfe67808c3e3f": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "2.0.0",
- "model_name": "HTMLStyleModel",
- "state": {
- "description_width": "",
- "font_size": null,
- "text_color": null
- }
- },
- "482df1c168634a809d027d3ad3dfd052": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "2.0.0",
- "model_name": "HBoxModel",
- "state": {
- "children": [
- "IPY_MODEL_867aff62ced0486796b586c405175543",
- "IPY_MODEL_ca5bb90900474e4c915db56466f0b901",
- "IPY_MODEL_afde46b2d05742eeb36d29b5748deab4"
- ],
- "layout": "IPY_MODEL_0a0a67436c09405a96c4c528eb3b567d"
- }
- },
- "4d985188bdfa4bb9b4edd6f16f27f7f6": {
- "model_module": "@jupyter-widgets/output",
- "model_module_version": "1.0.0",
- "model_name": "OutputModel",
- "state": {
- "layout": "IPY_MODEL_7e96a0c1f95c473492390c49be4df004",
- "outputs": [
- {
- "data": {
- "text/html": "Applying Weight Compression ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ 100% β’ 0:04:03 β’ 0:00:00\n
\n",
- "text/plain": "Applying Weight Compression \u001b[38;2;114;156;31mββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[35m100%\u001b[0m β’ \u001b[38;2;0;104;181m0:04:03\u001b[0m β’ \u001b[38;2;0;104;181m0:00:00\u001b[0m\n"
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ]
- }
- },
- "4e3c73a27d954a848390032bf478f304": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "2.0.0",
- "model_name": "HTMLStyleModel",
- "state": {
- "description_width": "",
- "font_size": null,
- "text_color": null
- }
- },
- "57f1bfcfb2f14051be1b593be91a1538": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "2.0.0",
- "model_name": "LayoutModel",
- "state": {}
- },
- "6b39e80a0c7d4910ae9e186f28693c6b": {
+ "9b6a9892f0e842168ac2c1290377a6b1": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
"model_name": "DropdownModel",
@@ -728,90 +594,26 @@
"Qwen/Qwen2-VL-7B-Instruct"
],
"description": "Model:",
- "index": 1,
- "layout": "IPY_MODEL_9aa0c70f4a4b43f78ef0cc0459bae6c0",
- "style": "IPY_MODEL_d4ad73ce65e84ea3bb3c63b8c6323c35"
+ "index": 0,
+ "layout": "IPY_MODEL_ec8f62dd8a8440c3b38810df8e4e8526",
+ "style": "IPY_MODEL_3eaf3559d14d4d929f7aef2f3eb18eba"
}
},
- "7635d38c01bd47cea7ba1ef264e30ec8": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "2.0.0",
- "model_name": "LayoutModel",
- "state": {}
- },
- "7941cc5ffde44b31b16e51b5eac07706": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "2.0.0",
- "model_name": "LayoutModel",
- "state": {}
- },
- "7e96a0c1f95c473492390c49be4df004": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "2.0.0",
- "model_name": "LayoutModel",
- "state": {}
- },
- "85defe60aba94ede94d1b7329f10f993": {
+ "d850a13d24cd4471bcd439711d8d1bfb": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
- "model_name": "ProgressStyleModel",
+ "model_name": "DescriptionStyleModel",
"state": {
"description_width": ""
}
},
- "867aff62ced0486796b586c405175543": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "2.0.0",
- "model_name": "HTMLModel",
- "state": {
- "layout": "IPY_MODEL_e1c3bd6ce8184a4cbbcf80004a3ffb6e",
- "style": "IPY_MODEL_3d1f8206e8434610b41dfe67808c3e3f",
- "value": "Loadingβcheckpointβshards:β100%"
- }
- },
- "9aa0c70f4a4b43f78ef0cc0459bae6c0": {
+ "ec8f62dd8a8440c3b38810df8e4e8526": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "2.0.0",
"model_name": "LayoutModel",
"state": {}
},
- "a39b4438d9de4661843c27b86c7e93db": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "2.0.0",
- "model_name": "LayoutModel",
- "state": {}
- },
- "afde46b2d05742eeb36d29b5748deab4": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "2.0.0",
- "model_name": "HTMLModel",
- "state": {
- "layout": "IPY_MODEL_57f1bfcfb2f14051be1b593be91a1538",
- "style": "IPY_MODEL_4e3c73a27d954a848390032bf478f304",
- "value": "β5/5β[00:01<00:00,ββ2.93it/s]"
- }
- },
- "ca5bb90900474e4c915db56466f0b901": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "2.0.0",
- "model_name": "FloatProgressModel",
- "state": {
- "bar_style": "success",
- "layout": "IPY_MODEL_7635d38c01bd47cea7ba1ef264e30ec8",
- "max": 5,
- "style": "IPY_MODEL_85defe60aba94ede94d1b7329f10f993",
- "value": 5
- }
- },
- "d4ad73ce65e84ea3bb3c63b8c6323c35": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "2.0.0",
- "model_name": "DescriptionStyleModel",
- "state": {
- "description_width": ""
- }
- },
- "df142541e95b4107b44ee9353f5e503a": {
+ "f75cab4cce234b378c0f9c5713e8202a": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
"model_name": "DropdownModel",
@@ -822,32 +624,8 @@
],
"description": "Device:",
"index": 1,
- "layout": "IPY_MODEL_a39b4438d9de4661843c27b86c7e93db",
- "style": "IPY_MODEL_1ba257658c1a426c86e7ea47b1d0b24d"
- }
- },
- "e1c3bd6ce8184a4cbbcf80004a3ffb6e": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "2.0.0",
- "model_name": "LayoutModel",
- "state": {}
- },
- "e7e227188f8041f2a446ff3a1159261c": {
- "model_module": "@jupyter-widgets/output",
- "model_module_version": "1.0.0",
- "model_name": "OutputModel",
- "state": {
- "layout": "IPY_MODEL_7941cc5ffde44b31b16e51b5eac07706",
- "outputs": [
- {
- "data": {
- "text/html": "Applying Weight Compression ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ 100% β’ 0:00:20 β’ 0:00:00\n
\n",
- "text/plain": "Applying Weight Compression \u001b[38;2;114;156;31mββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[35m100%\u001b[0m β’ \u001b[38;2;0;104;181m0:00:20\u001b[0m β’ \u001b[38;2;0;104;181m0:00:00\u001b[0m\n"
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ]
+ "layout": "IPY_MODEL_31e4430151654c27ab36860fd5e4a4d6",
+ "style": "IPY_MODEL_d850a13d24cd4471bcd439711d8d1bfb"
}
}
},