Compare commits

...

2 Commits

2 changed files with 23 additions and 3 deletions

View File

@ -1,5 +1,8 @@
pyaml
librosa
openai-whisper
#https://pytorch.org/get-started/locally/
torch==2.5.1
torchvision==0.20.1
torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu124
--extra-index-url https://download.pytorch.org/whl/cu128
torch
torchvision
torchaudio

View File

@ -19,6 +19,23 @@ with open("transcription_config.yml", "r", encoding="utf-8") as file:
settings = yaml.safe_load(file)
folder_list = settings.get("folder_list")
model_name = settings.get("model_name")
gpu_only = settings.get("gpu_only", False)
print("PyTorch version:", torch.__version__)
print("CUDA available?", torch.cuda.is_available())
print("CUDA version:", torch.version.cuda)
print("GPU count:", torch.cuda.device_count())
if torch.cuda.is_available():
for i in range(torch.cuda.device_count()):
print(f" Device {i}:", torch.cuda.get_device_name(i))
if not folder_list or not model_name:
print("Error: Please check the transcription_config.yml file. It should contain 'folder_list' and 'model_name'.")
sys.exit(1)
if gpu_only and not torch.cuda.is_available():
print("Error: You requested to only use GPU but it is not available. Please check your PyTorch installation.")
sys.exit(1)
def load_audio_librosa(path: str, sr: int = 16_000) -> np.ndarray:
audio, orig_sr = librosa.load(path, sr=sr) # load + resample to 16 kHz