Skip to content

Commit 75ada25

Browse files
authored
Harmonize HF environment variables + deprecate use_auth_token (#6066)
* Harmonize HF environment variables + deprecate use_auth_token * fix import * fix
1 parent 2243a59 commit 75ada25

30 files changed

+235
-239
lines changed

docs/source/en/using-diffusers/push_to_hub.md

+1-7
Original file line numberDiff line numberDiff line change
@@ -174,10 +174,4 @@ Set `private=True` in the [`~diffusers.utils.PushToHubMixin.push_to_hub`] functi
174174
controlnet.push_to_hub("my-controlnet-model-private", private=True)
175175
```
176176

177-
Private repositories are only visible to you, and other users won't be able to clone the repository and your repository won't appear in search results. Even if a user has the URL to your private repository, they'll receive a `404 - Sorry, we can't find the page you are looking for.`
178-
179-
To load a model, scheduler, or pipeline from private or gated repositories, set `use_auth_token=True`:
180-
181-
```py
182-
model = ControlNetModel.from_pretrained("your-namespace/my-controlnet-model-private", use_auth_token=True)
183-
```
177+
Private repositories are only visible to you, and other users won't be able to clone the repository and your repository won't appear in search results. Even if a user has the URL to your private repository, they'll receive a `404 - Sorry, we can't find the page you are looking for`. You must be [logged in](https://huggingface.co/docs/huggingface_hub/quick-start#login) to load a model from a private repository.

examples/community/README.md

-4
Original file line numberDiff line numberDiff line change
@@ -512,7 +512,6 @@ device = torch.device('cpu' if not has_cuda else 'cuda')
512512
pipe = DiffusionPipeline.from_pretrained(
513513
"CompVis/stable-diffusion-v1-4",
514514
safety_checker=None,
515-
use_auth_token=True,
516515
custom_pipeline="imagic_stable_diffusion",
517516
scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
518517
).to(device)
@@ -552,7 +551,6 @@ device = th.device('cpu' if not has_cuda else 'cuda')
552551

553552
pipe = DiffusionPipeline.from_pretrained(
554553
"CompVis/stable-diffusion-v1-4",
555-
use_auth_token=True,
556554
custom_pipeline="seed_resize_stable_diffusion"
557555
).to(device)
558556

@@ -588,7 +586,6 @@ generator = th.Generator("cuda").manual_seed(0)
588586

589587
pipe = DiffusionPipeline.from_pretrained(
590588
"CompVis/stable-diffusion-v1-4",
591-
use_auth_token=True,
592589
custom_pipeline="/home/mark/open_source/diffusers/examples/community/"
593590
).to(device)
594591

@@ -607,7 +604,6 @@ image.save('./seed_resize/seed_resize_{w}_{h}_image.png'.format(w=width, h=heigh
607604

608605
pipe_compare = DiffusionPipeline.from_pretrained(
609606
"CompVis/stable-diffusion-v1-4",
610-
use_auth_token=True,
611607
custom_pipeline="/home/mark/open_source/diffusers/examples/community/"
612608
).to(device)
613609

examples/community/checkpoint_merger.py

+8-6
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,11 @@
55
import safetensors.torch
66
import torch
77
from huggingface_hub import snapshot_download
8+
from huggingface_hub.utils import validate_hf_hub_args
89

910
from diffusers import DiffusionPipeline, __version__
1011
from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
11-
from diffusers.utils import CONFIG_NAME, DIFFUSERS_CACHE, ONNX_WEIGHTS_NAME, WEIGHTS_NAME
12+
from diffusers.utils import CONFIG_NAME, ONNX_WEIGHTS_NAME, WEIGHTS_NAME
1213

1314

1415
class CheckpointMergerPipeline(DiffusionPipeline):
@@ -57,6 +58,7 @@ def _remove_meta_keys(self, config_dict: Dict):
5758
return (temp_dict, meta_keys)
5859

5960
@torch.no_grad()
61+
@validate_hf_hub_args
6062
def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]], **kwargs):
6163
"""
6264
Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed
@@ -69,7 +71,7 @@ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]
6971
**kwargs:
7072
Supports all the default DiffusionPipeline.get_config_dict kwargs viz..
7173
72-
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map.
74+
cache_dir, resume_download, force_download, proxies, local_files_only, token, revision, torch_dtype, device_map.
7375
7476
alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
7577
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
@@ -81,12 +83,12 @@ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]
8183
8284
"""
8385
# Default kwargs from DiffusionPipeline
84-
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
86+
cache_dir = kwargs.pop("cache_dir", None)
8587
resume_download = kwargs.pop("resume_download", False)
8688
force_download = kwargs.pop("force_download", False)
8789
proxies = kwargs.pop("proxies", None)
8890
local_files_only = kwargs.pop("local_files_only", False)
89-
use_auth_token = kwargs.pop("use_auth_token", None)
91+
token = kwargs.pop("token", None)
9092
revision = kwargs.pop("revision", None)
9193
torch_dtype = kwargs.pop("torch_dtype", None)
9294
device_map = kwargs.pop("device_map", None)
@@ -123,7 +125,7 @@ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]
123125
force_download=force_download,
124126
proxies=proxies,
125127
local_files_only=local_files_only,
126-
use_auth_token=use_auth_token,
128+
token=token,
127129
revision=revision,
128130
)
129131
config_dicts.append(config_dict)
@@ -159,7 +161,7 @@ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]
159161
resume_download=resume_download,
160162
proxies=proxies,
161163
local_files_only=local_files_only,
162-
use_auth_token=use_auth_token,
164+
token=token,
163165
revision=revision,
164166
allow_patterns=allow_patterns,
165167
user_agent=user_agent,

examples/community/stable_diffusion_tensorrt_img2img.py

+6-4
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
import tensorrt as trt
2929
import torch
3030
from huggingface_hub import snapshot_download
31+
from huggingface_hub.utils import validate_hf_hub_args
3132
from onnx import shape_inference
3233
from polygraphy import cuda
3334
from polygraphy.backend.common import bytes_from_path
@@ -50,7 +51,7 @@
5051
StableDiffusionSafetyChecker,
5152
)
5253
from diffusers.schedulers import DDIMScheduler
53-
from diffusers.utils import DIFFUSERS_CACHE, logging
54+
from diffusers.utils import logging
5455

5556

5657
"""
@@ -778,12 +779,13 @@ def __loadModels(self):
778779
self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
779780

780781
@classmethod
782+
@validate_hf_hub_args
781783
def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
782-
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
784+
cache_dir = kwargs.pop("cache_dir", None)
783785
resume_download = kwargs.pop("resume_download", False)
784786
proxies = kwargs.pop("proxies", None)
785787
local_files_only = kwargs.pop("local_files_only", False)
786-
use_auth_token = kwargs.pop("use_auth_token", None)
788+
token = kwargs.pop("token", None)
787789
revision = kwargs.pop("revision", None)
788790

789791
cls.cached_folder = (
@@ -795,7 +797,7 @@ def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os
795797
resume_download=resume_download,
796798
proxies=proxies,
797799
local_files_only=local_files_only,
798-
use_auth_token=use_auth_token,
800+
token=token,
799801
revision=revision,
800802
)
801803
)

examples/community/stable_diffusion_tensorrt_inpaint.py

+6-4
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
import tensorrt as trt
2929
import torch
3030
from huggingface_hub import snapshot_download
31+
from huggingface_hub.utils import validate_hf_hub_args
3132
from onnx import shape_inference
3233
from polygraphy import cuda
3334
from polygraphy.backend.common import bytes_from_path
@@ -51,7 +52,7 @@
5152
)
5253
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image
5354
from diffusers.schedulers import DDIMScheduler
54-
from diffusers.utils import DIFFUSERS_CACHE, logging
55+
from diffusers.utils import logging
5556

5657

5758
"""
@@ -779,12 +780,13 @@ def __loadModels(self):
779780
self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
780781

781782
@classmethod
783+
@validate_hf_hub_args
782784
def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
783-
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
785+
cache_dir = kwargs.pop("cache_dir", None)
784786
resume_download = kwargs.pop("resume_download", False)
785787
proxies = kwargs.pop("proxies", None)
786788
local_files_only = kwargs.pop("local_files_only", False)
787-
use_auth_token = kwargs.pop("use_auth_token", None)
789+
token = kwargs.pop("token", None)
788790
revision = kwargs.pop("revision", None)
789791

790792
cls.cached_folder = (
@@ -796,7 +798,7 @@ def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os
796798
resume_download=resume_download,
797799
proxies=proxies,
798800
local_files_only=local_files_only,
799-
use_auth_token=use_auth_token,
801+
token=token,
800802
revision=revision,
801803
)
802804
)

examples/community/stable_diffusion_tensorrt_txt2img.py

+6-4
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
import tensorrt as trt
2828
import torch
2929
from huggingface_hub import snapshot_download
30+
from huggingface_hub.utils import validate_hf_hub_args
3031
from onnx import shape_inference
3132
from polygraphy import cuda
3233
from polygraphy.backend.common import bytes_from_path
@@ -49,7 +50,7 @@
4950
StableDiffusionSafetyChecker,
5051
)
5152
from diffusers.schedulers import DDIMScheduler
52-
from diffusers.utils import DIFFUSERS_CACHE, logging
53+
from diffusers.utils import logging
5354

5455

5556
"""
@@ -691,12 +692,13 @@ def __loadModels(self):
691692
self.models["vae"] = make_VAE(self.vae, **models_args)
692693

693694
@classmethod
695+
@validate_hf_hub_args
694696
def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
695-
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
697+
cache_dir = kwargs.pop("cache_dir", None)
696698
resume_download = kwargs.pop("resume_download", False)
697699
proxies = kwargs.pop("proxies", None)
698700
local_files_only = kwargs.pop("local_files_only", False)
699-
use_auth_token = kwargs.pop("use_auth_token", None)
701+
token = kwargs.pop("token", None)
700702
revision = kwargs.pop("revision", None)
701703

702704
cls.cached_folder = (
@@ -708,7 +710,7 @@ def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os
708710
resume_download=resume_download,
709711
proxies=proxies,
710712
local_files_only=local_files_only,
711-
use_auth_token=use_auth_token,
713+
token=token,
712714
revision=revision,
713715
)
714716
)

examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -423,7 +423,7 @@ def import_model_class_from_model_name_or_path(
423423
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
424424
):
425425
text_encoder_config = PretrainedConfig.from_pretrained(
426-
pretrained_model_name_or_path, subfolder=subfolder, revision=revision, use_auth_token=True
426+
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
427427
)
428428
model_class = text_encoder_config.architectures[0]
429429

examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -397,7 +397,7 @@ def import_model_class_from_model_name_or_path(
397397
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
398398
):
399399
text_encoder_config = PretrainedConfig.from_pretrained(
400-
pretrained_model_name_or_path, subfolder=subfolder, revision=revision, use_auth_token=True
400+
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
401401
)
402402
model_class = text_encoder_config.architectures[0]
403403

examples/consistency_distillation/train_lcm_distill_sd_wds.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -400,7 +400,7 @@ def import_model_class_from_model_name_or_path(
400400
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
401401
):
402402
text_encoder_config = PretrainedConfig.from_pretrained(
403-
pretrained_model_name_or_path, subfolder=subfolder, revision=revision, use_auth_token=True
403+
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
404404
)
405405
model_class = text_encoder_config.architectures[0]
406406

examples/consistency_distillation/train_lcm_distill_sdxl_wds.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -419,7 +419,7 @@ def import_model_class_from_model_name_or_path(
419419
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
420420
):
421421
text_encoder_config = PretrainedConfig.from_pretrained(
422-
pretrained_model_name_or_path, subfolder=subfolder, revision=revision, use_auth_token=True
422+
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
423423
)
424424
model_class = text_encoder_config.architectures[0]
425425

examples/research_projects/controlnet/train_controlnet_webdataset.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -420,7 +420,7 @@ def import_model_class_from_model_name_or_path(
420420
pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
421421
):
422422
text_encoder_config = PretrainedConfig.from_pretrained(
423-
pretrained_model_name_or_path, subfolder=subfolder, revision=revision, use_auth_token=True
423+
pretrained_model_name_or_path, subfolder=subfolder, revision=revision
424424
)
425425
model_class = text_encoder_config.architectures[0]
426426

@@ -975,7 +975,7 @@ def main(args):
975975
revision=args.revision,
976976
)
977977
unet = UNet2DConditionModel.from_pretrained(
978-
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, use_auth_token=True
978+
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
979979
)
980980

981981
if args.controlnet_model_name_or_path:

src/diffusers/commands/fp16_safetensors.py

+10-11
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
import glob
2121
import json
22+
import warnings
2223
from argparse import ArgumentParser, Namespace
2324
from importlib import import_module
2425

@@ -32,12 +33,12 @@
3233

3334

3435
def conversion_command_factory(args: Namespace):
35-
return FP16SafetensorsCommand(
36-
args.ckpt_id,
37-
args.fp16,
38-
args.use_safetensors,
39-
args.use_auth_token,
40-
)
36+
if args.use_auth_token:
37+
warnings.warn(
38+
"The `--use_auth_token` flag is deprecated and will be removed in a future version. Authentication is now"
39+
" handled automatically if user is logged in."
40+
)
41+
return FP16SafetensorsCommand(args.ckpt_id, args.fp16, args.use_safetensors)
4142

4243

4344
class FP16SafetensorsCommand(BaseDiffusersCLICommand):
@@ -62,7 +63,7 @@ def register_subcommand(parser: ArgumentParser):
6263
)
6364
conversion_parser.set_defaults(func=conversion_command_factory)
6465

65-
def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool, use_auth_token: bool):
66+
def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool):
6667
self.logger = logging.get_logger("diffusers-cli/fp16_safetensors")
6768
self.ckpt_id = ckpt_id
6869
self.local_ckpt_dir = f"/tmp/{ckpt_id}"
@@ -75,8 +76,6 @@ def __init__(self, ckpt_id: str, fp16: bool, use_safetensors: bool, use_auth_tok
7576
"When `use_safetensors` and `fp16` both are False, then this command is of no use."
7677
)
7778

78-
self.use_auth_token = use_auth_token
79-
8079
def run(self):
8180
if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"):
8281
raise ImportError(
@@ -87,7 +86,7 @@ def run(self):
8786
from huggingface_hub import create_commit
8887
from huggingface_hub._commit_api import CommitOperationAdd
8988

90-
model_index = hf_hub_download(repo_id=self.ckpt_id, filename="model_index.json", token=self.use_auth_token)
89+
model_index = hf_hub_download(repo_id=self.ckpt_id, filename="model_index.json")
9190
with open(model_index, "r") as f:
9291
pipeline_class_name = json.load(f)["_class_name"]
9392
pipeline_class = getattr(import_module("diffusers"), pipeline_class_name)
@@ -96,7 +95,7 @@ def run(self):
9695
# Load the appropriate pipeline. We could have use `DiffusionPipeline`
9796
# here, but just to avoid any rough edge cases.
9897
pipeline = pipeline_class.from_pretrained(
99-
self.ckpt_id, torch_dtype=torch.float16 if self.fp16 else torch.float32, use_auth_token=self.use_auth_token
98+
self.ckpt_id, torch_dtype=torch.float16 if self.fp16 else torch.float32
10099
)
101100
pipeline.save_pretrained(
102101
self.local_ckpt_dir,

0 commit comments

Comments
 (0)