From f046afaebb11adaa393a5db6a29e85098aa80688 Mon Sep 17 00:00:00 2001 From: lelo Date: Sun, 8 Jun 2025 19:25:25 +0200 Subject: [PATCH] improve tracking of local installation for transcription --- requirements_transcription.txt | 9 ++++++--- transcribe_all.py | 17 +++++++++++++++++ 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/requirements_transcription.txt b/requirements_transcription.txt index bb2ef44..3642026 100644 --- a/requirements_transcription.txt +++ b/requirements_transcription.txt @@ -1,5 +1,8 @@ +pyaml +librosa openai-whisper #https://pytorch.org/get-started/locally/ -torch==2.5.1 -torchvision==0.20.1 -torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu124 \ No newline at end of file +--extra-index-url https://download.pytorch.org/whl/cu128 +torch +torchvision +torchaudio \ No newline at end of file diff --git a/transcribe_all.py b/transcribe_all.py index e8df9af..dd01b36 100755 --- a/transcribe_all.py +++ b/transcribe_all.py @@ -19,6 +19,23 @@ with open("transcription_config.yml", "r", encoding="utf-8") as file: settings = yaml.safe_load(file) folder_list = settings.get("folder_list") model_name = settings.get("model_name") + gpu_only = settings.get("gpu_only", False) + +print("PyTorch version:", torch.__version__) +print("CUDA available?", torch.cuda.is_available()) +print("CUDA version:", torch.version.cuda) +print("GPU count:", torch.cuda.device_count()) +if torch.cuda.is_available(): + for i in range(torch.cuda.device_count()): + print(f" Device {i}:", torch.cuda.get_device_name(i)) + +if not folder_list or not model_name: + print("Error: Please check the transcription_config.yml file. It should contain 'folder_list' and 'model_name'.") + sys.exit(1) + +if gpu_only and not torch.cuda.is_available(): + print("Error: You requested to only use GPU but it is not available. Please check your PyTorch installation.") + sys.exit(1) def load_audio_librosa(path: str, sr: int = 16_000) -> np.ndarray: audio, orig_sr = librosa.load(path, sr=sr) # load + resample to 16 kHz