diff --git a/invokeai/app/invocations/flux_lora_loader.py b/invokeai/app/invocations/flux_lora_loader.py index 3cfbb87851..d9e655a507 100644 --- a/invokeai/app/invocations/flux_lora_loader.py +++ b/invokeai/app/invocations/flux_lora_loader.py @@ -32,7 +32,7 @@ class FluxLoRALoaderOutput(BaseInvocationOutput): classification=Classification.Prototype, ) class FluxLoRALoaderInvocation(BaseInvocation): - """Apply a LoRA model to a FLUX transformer and/or T5 encoder.""" + """Apply a LoRA model to a FLUX transformer and/or text encoder.""" lora: ModelIdentifierField = InputField( description=FieldDescriptions.lora_model, title="LoRA", ui_type=UIType.LoRAModel diff --git a/invokeai/app/invocations/flux_text_encoder.py b/invokeai/app/invocations/flux_text_encoder.py index ac70273317..a306a8aa95 100644 --- a/invokeai/app/invocations/flux_text_encoder.py +++ b/invokeai/app/invocations/flux_text_encoder.py @@ -93,7 +93,7 @@ def _clip_encode(self, context: InvocationContext) -> torch.Tensor: clip_text_encoder_config = clip_text_encoder_info.config assert clip_text_encoder_config is not None - # Apply LoRA models to the T5 encoder. + # Apply LoRA models to the CLIP encoder. # Note: We apply the LoRA after the transformer has been moved to its target device for faster patching. if clip_text_encoder_config.format in [ModelFormat.Diffusers]: # The model is non-quantized, so we can apply the LoRA weights directly into the model. diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index c53c3d56b7..777e7bfd30 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -6397,7 +6397,7 @@ export type components = { }; /** * FLUX LoRA - * @description Apply a LoRA model to a FLUX transformer and/or T5 encoder. + * @description Apply a LoRA model to a FLUX transformer and/or text encoder. */ FluxLoRALoaderInvocation: { /**