-
Notifications
You must be signed in to change notification settings - Fork 448
Open
Labels
maebAudio extensionAudio extension
Description
tested AudioPairClassification and AudioClassification
e.g.,
Found 7425/14856 duplicates in the input data. Only encoding unique sentences.
0%| | 0/929 [00:00<?, ?it/s]
Error while evaluating CREMADPairClassification: axes don't match array
Traceback (most recent call last):
File "/mnt/workspace/workgroup/chx/mteb-maeb/run_models5.py", line 27, in <module>
results = evaluation.run(model, output_folder="/mnt/workspace/workgroup/chx/mteb-maeb/results", batch_size=8)
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/evaluation/MTEB.py", line 672, in run
raise e
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/evaluation/MTEB.py", line 625, in run
results, tick, tock = self._run_eval(
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/evaluation/MTEB.py", line 307, in _run_eval
results = task.evaluate(
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/abstasks/AbsTask.py", line 137, in evaluate
scores[hf_subset] = self._evaluate_subset(
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/abstasks/Audio/AbsTaskAudioPairClassification.py", line 58, in _evaluate_subset
scores = evaluator.compute_metrics(model, encode_kwargs=encode_kwargs)
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/evaluation/evaluators/Audio/AudioPairClassificationEvaluator.py", line 98, in compute_metrics
embeddings = model.get_audio_embeddings(
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/models/whisper_models.py", line 119, in get_audio_embeddings
inputs = self.processor.feature_extractor(
File "/mnt/workspace/workgroup/chx/env/maeb/lib/python3.10/site-packages/transformers/models/whisper/feature_extraction_whisper.py", line 312, in __call__
input_features = padded_inputs.get("input_features").transpose(2, 0, 1)
ValueError: axes don't match array
and
Error while evaluating ESC50: axes don't match array
Traceback (most recent call last):
File "/mnt/workspace/workgroup/chx/mteb-maeb/run_models5.py", line 57, in <module>
results = evaluation.run(model, output_folder="/mnt/workspace/workgroup/chx/mteb-maeb/results", batch_size=8)
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/evaluation/MTEB.py", line 672, in run
raise e
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/evaluation/MTEB.py", line 625, in run
results, tick, tock = self._run_eval(
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/evaluation/MTEB.py", line 307, in _run_eval
results = task.evaluate(
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/abstasks/Audio/AbsTaskAudioClassification.py", line 94, in evaluate
scores[hf_subset] = self._evaluate_subset_cross_validation(
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/abstasks/Audio/AbsTaskAudioClassification.py", line 170, in _evaluate_subset_cross_validation
scores_exp, test_cache = evaluator(model, test_cache=test_cache)
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/evaluation/evaluators/Audio/ClassificationEvaluator.py", line 83, in __call__
X_train = model.get_audio_embeddings(
File "/mnt/workspace/workgroup/chx/mteb-maeb/mteb/models/whisper_models.py", line 119, in get_audio_embeddings
inputs = self.processor.feature_extractor(
File "/mnt/workspace/workgroup/chx/env/maeb/lib/python3.10/site-packages/transformers/models/whisper/feature_extraction_whisper.py", line 312, in __call__
input_features = padded_inputs.get("input_features").transpose(2, 0, 1)
ValueError: axes don't match array
Metadata
Metadata
Assignees
Labels
maebAudio extensionAudio extension