Global: model: whisper_large mode: predict # only support predict device: gpu:0 output: "output" Predict: batch_size: 1 model_dir: "whisper_large" input: "https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav" kernel_option: run_mode: paddle