Skip to content

Commit 9ac105e

Browse files
committed
chore: use original instead of scarf urls
These allowed Coqui to get download stats, which we don't need anymore
1 parent 9a01c90 commit 9ac105e

File tree

8 files changed

+130
-134
lines changed

8 files changed

+130
-134
lines changed

.github/workflows/tests.yml

-4
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,6 @@ jobs:
2929
sudo apt-get update
3030
sudo apt-get install -y --no-install-recommends git make gcc
3131
make system-deps
32-
- name: Replace scarf urls
33-
if: contains(fromJSON('["data_tests", "inference_tests", "test_aux", "test_tts", "test_tts2", "test_xtts", "test_zoo0", "test_zoo1", "test_zoo2"]'), matrix.subset)
34-
run: |
35-
sed -i 's/https:\/\/coqui.gateway.scarf.sh\//https:\/\/github.com\/coqui-ai\/TTS\/releases\/download\//g' TTS/.models.json
3632
- name: Unit tests
3733
run: |
3834
resolution=highest

TTS/.models.json

+107-107
Large diffs are not rendered by default.

TTS/demos/xtts_ft_demo/utils/gpt_train.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,8 @@ def train_gpt(language, num_epochs, batch_size, grad_acumm, train_csv, eval_csv,
4343
os.makedirs(CHECKPOINTS_OUT_PATH, exist_ok=True)
4444

4545
# DVAE files
46-
DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/dvae.pth"
47-
MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/mel_stats.pth"
46+
DVAE_CHECKPOINT_LINK = "https://huggingface.co/coqui/XTTS-v2/resolve/main/dvae.pth"
47+
MEL_NORM_LINK = "https://huggingface.co/coqui/XTTS-v2/resolve/main/mel_stats.pth"
4848

4949
# Set the path to the downloaded files
5050
DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(DVAE_CHECKPOINT_LINK))
@@ -58,9 +58,9 @@ def train_gpt(language, num_epochs, batch_size, grad_acumm, train_csv, eval_csv,
5858
)
5959

6060
# Download XTTS v2.0 checkpoint if needed
61-
TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json"
62-
XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/model.pth"
63-
XTTS_CONFIG_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/config.json"
61+
TOKENIZER_FILE_LINK = "https://huggingface.co/coqui/XTTS-v2/resolve/main/vocab.json"
62+
XTTS_CHECKPOINT_LINK = "https://huggingface.co/coqui/XTTS-v2/resolve/main/model.pth"
63+
XTTS_CONFIG_LINK = "https://huggingface.co/coqui/XTTS-v2/resolve/main/config.json"
6464

6565
# XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning.
6666
TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(TOKENIZER_FILE_LINK)) # vocab.json file

TTS/tts/layers/tortoise/arch_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ def forward(self, x):
293293
return h[:, :, 0]
294294

295295

296-
DEFAULT_MEL_NORM_FILE = "https://coqui.gateway.scarf.sh/v0.14.1_models/mel_norms.pth"
296+
DEFAULT_MEL_NORM_FILE = "https://github.com/coqui-ai/TTS/releases/download/v0.14.1_models/mel_norms.pth"
297297

298298

299299
class TorchMelSpectrogram(nn.Module):

TTS/tts/layers/xtts/trainer/gpt_trainer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class GPTArgs(XttsArgs):
5050
max_wav_length: int = 255995 # ~11.6 seconds
5151
max_text_length: int = 200
5252
tokenizer_file: str = ""
53-
mel_norm_file: str = "https://coqui.gateway.scarf.sh/v0.14.0_models/mel_norms.pth"
53+
mel_norm_file: str = "https://github.com/coqui-ai/TTS/releases/download/v0.14.0_models/mel_norms.pth"
5454
dvae_checkpoint: str = ""
5555
xtts_checkpoint: str = ""
5656
gpt_checkpoint: str = "" # if defined it will replace the gpt weights on xtts model

TTS/utils/manage.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ def _download_hf_model(self, model_item: Dict, output_path: str):
230230
self._download_zip_file(model_item["hf_url"], output_path, self.progress_bar)
231231

232232
def download_fairseq_model(self, model_name, output_path):
233-
URI_PREFIX = "https://coqui.gateway.scarf.sh/fairseq/"
233+
URI_PREFIX = "https://dl.fbaipublicfiles.com/mms/tts/"
234234
_, lang, _, _ = model_name.split("/")
235235
model_download_uri = os.path.join(URI_PREFIX, f"{lang}.tar.gz")
236236
self._download_tar_file(model_download_uri, output_path, self.progress_bar)
@@ -243,9 +243,9 @@ def set_model_url(model_item: Dict):
243243
elif "hf_url" in model_item:
244244
model_item["model_url"] = model_item["hf_url"]
245245
elif "fairseq" in model_item["model_name"]:
246-
model_item["model_url"] = "https://coqui.gateway.scarf.sh/fairseq/"
246+
model_item["model_url"] = "https://dl.fbaipublicfiles.com/mms/tts/"
247247
elif "xtts" in model_item["model_name"]:
248-
model_item["model_url"] = "https://coqui.gateway.scarf.sh/xtts/"
248+
model_item["model_url"] = "https://huggingface.co/coqui/"
249249
return model_item
250250

251251
def _set_model_item(self, model_name):
@@ -278,11 +278,11 @@ def _set_model_item(self, model_name):
278278
"contact": "[email protected]",
279279
"tos_required": True,
280280
"hf_url": [
281-
f"https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/{model_version}/model.pth",
282-
f"https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/{model_version}/config.json",
283-
f"https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/{model_version}/vocab.json",
284-
f"https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/{model_version}/hash.md5",
285-
f"https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/{model_version}/speakers_xtts.pth",
281+
f"https://huggingface.co/coqui/XTTS-v2/resolve/{model_version}/model.pth",
282+
f"https://huggingface.co/coqui/XTTS-v2/resolve/{model_version}/config.json",
283+
f"https://huggingface.co/coqui/XTTS-v2/resolve/{model_version}/vocab.json",
284+
f"https://huggingface.co/coqui/XTTS-v2/resolve/{model_version}/hash.md5",
285+
f"https://huggingface.co/coqui/XTTS-v2/resolve/{model_version}/speakers_xtts.pth",
286286
],
287287
}
288288
else:

recipes/ljspeech/xtts_v1/train_gpt_xtts.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,8 @@
4141

4242

4343
# DVAE files
44-
DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/dvae.pth"
45-
MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/mel_stats.pth"
44+
DVAE_CHECKPOINT_LINK = "https://huggingface.co/coqui/XTTS-v1/resolve/v1.1.2/dvae.pth"
45+
MEL_NORM_LINK = "https://huggingface.co/coqui/XTTS-v1/resolve/v1.1.2/mel_stats.pth"
4646

4747
# Set the path to the downloaded files
4848
DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, DVAE_CHECKPOINT_LINK.split("/")[-1])
@@ -55,8 +55,8 @@
5555

5656

5757
# Download XTTS v1.1 checkpoint if needed
58-
TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/vocab.json"
59-
XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/model.pth"
58+
TOKENIZER_FILE_LINK = "https://huggingface.co/coqui/XTTS-v1/resolve/v1.1.2/vocab.json"
59+
XTTS_CHECKPOINT_LINK = "https://huggingface.co/coqui/XTTS-v1/resolve/v1.1.2/model.pth"
6060

6161
# XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning.
6262
TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, TOKENIZER_FILE_LINK.split("/")[-1]) # vocab.json file

recipes/ljspeech/xtts_v2/train_gpt_xtts.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,8 @@
4141

4242

4343
# DVAE files
44-
DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/dvae.pth"
45-
MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/mel_stats.pth"
44+
DVAE_CHECKPOINT_LINK = "https://huggingface.co/coqui/XTTS-v2/resolve/main/dvae.pth"
45+
MEL_NORM_LINK = "https://huggingface.co/coqui/XTTS-v2/resolve/main/mel_stats.pth"
4646

4747
# Set the path to the downloaded files
4848
DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(DVAE_CHECKPOINT_LINK))
@@ -55,8 +55,8 @@
5555

5656

5757
# Download XTTS v2.0 checkpoint if needed
58-
TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json"
59-
XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/model.pth"
58+
TOKENIZER_FILE_LINK = "https://huggingface.co/coqui/XTTS-v2/resolve/main/vocab.json"
59+
XTTS_CHECKPOINT_LINK = "https://huggingface.co/coqui/XTTS-v2/resolve/main/model.pth"
6060

6161
# XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning.
6262
TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(TOKENIZER_FILE_LINK)) # vocab.json file

0 commit comments

Comments
 (0)