https://github.com/SYSTRAN/faster-whisper
conda create -n whisper python=3.9 -y
conda activate whisper
pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu117
import torch
print('CUDA版本:',torch.version.cuda)
print('Pytorch版本:',torch.__version__)
print('显卡是否可用:','可用' if(torch.cuda.is_available()) else '不可用')
print('显卡数量:',torch.cuda.device_count())
print('是否支持BF16数字格式:','支持' if (torch.cuda.is_bf16_supported()) else '不支持')
print('当前显卡型号:',torch.cuda.get_device_name())
print('当前显卡的CUDA算力:',torch.cuda.get_device_capability())
print('当前显卡的总显存:',torch.cuda.get_device_properties(0).total_memory/1024/1024/1024,'GB')
print('是否支持TensorCore:','支持' if (torch.cuda.get_device_properties(0).major >= 7) else '不支持')
print('当前显卡的显存使用率:',torch.cuda.memory_allocated(0)/torch.cuda.get_device_properties(0).total_memory*100,'%')
web服务
from flask import Flask, request, jsonify
import os
from faster_whisper import WhisperModel
app = Flask(__name__)
# 设置上传文件保存的目录
UPLOAD_FOLDER = 'uploads'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# 允许上传的文件类型
ALLOWED_EXTENSIONS = {'mp3','wav'}
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['POST'])
def upload_file():
try:
# 检查是否有文件被上传
if 'file' not in request.files:
return jsonify({'error': 'No file provided'})
file = request.files['file']
# 如果用户未选择文件,浏览器也会发送一个空的文件
if file.filename == '':
return jsonify({'error': 'No file selected'})
map_array = []
# 检查文件类型
if file and allowed_file(file.filename):
# 保存上传的文件
file_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(file_path)
segments, info = model.transcribe(file_path, beam_size=5)
print("Detected language '%s' with probability %f" % (info.language, info.language_probability))
for segment in segments:
print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
map_array.append({"start": round(segment.start,2), "end": round(segment.end,2), "text": segment.text})
return jsonify(map_array)
except Exception as e:
return jsonify({'error': str(e)})
#
if __name__ == '__main__':
# 确保 'uploads' 文件夹存在
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
model_size = "large-v3"
# Run on GPU with FP16
model = WhisperModel(model_size, device="cuda", compute_type="float16")
# 启动 Flask 应用
app.run(host='0.0.0.0', port=5000, debug=True)