diff --git a/src/diffusers/pipelines/stable_audio/pipeline_stable_audio.py b/src/diffusers/pipelines/stable_audio/pipeline_stable_audio.py index 779c4f0dd173..4fe082d88957 100644 --- a/src/diffusers/pipelines/stable_audio/pipeline_stable_audio.py +++ b/src/diffusers/pipelines/stable_audio/pipeline_stable_audio.py @@ -44,7 +44,7 @@ >>> import soundfile as sf >>> from diffusers import StableAudioPipeline - >>> repo_id = "ylacombe/stable-audio-1.0" # TODO (YL): change once set + >>> repo_id = "stabilityai/stable-audio-open-1.0" >>> pipe = StableAudioPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") diff --git a/tests/models/autoencoders/test_models_vae.py b/tests/models/autoencoders/test_models_vae.py index cff2ce63c8e3..38cbd788a95e 100644 --- a/tests/models/autoencoders/test_models_vae.py +++ b/tests/models/autoencoders/test_models_vae.py @@ -1182,9 +1182,7 @@ def get_audio(self, audio_sample_size=2097152, fp16=False): return audio - def get_oobleck_vae_model( - self, model_id="ylacombe/stable-audio-1.0", fp16=False - ): # TODO (YL): change repo id once moved + def get_oobleck_vae_model(self, model_id="stabilityai/stable-audio-open-1.0", fp16=False): torch_dtype = torch.float16 if fp16 else torch.float32 model = AutoencoderOobleck.from_pretrained( diff --git a/tests/pipelines/stable_audio/test_stable_audio.py b/tests/pipelines/stable_audio/test_stable_audio.py index d89bd70575c9..fe8a684de0cb 100644 --- a/tests/pipelines/stable_audio/test_stable_audio.py +++ b/tests/pipelines/stable_audio/test_stable_audio.py @@ -439,9 +439,7 @@ def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0 return inputs def test_stable_audio(self): - stable_audio_pipe = StableAudioPipeline.from_pretrained( - "ylacombe/stable-audio-1.0" - ) # TODO (YL): change once changed + stable_audio_pipe = StableAudioPipeline.from_pretrained("stabilityai/stable-audio-open-1.0") stable_audio_pipe = stable_audio_pipe.to(torch_device) stable_audio_pipe.set_progress_bar_config(disable=None)