-
Notifications
You must be signed in to change notification settings - Fork 179
/
whisper_speech_recognition.cpp
47 lines (38 loc) · 1.64 KB
/
whisper_speech_recognition.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
// Copyright (C) 2023-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include "audio_utils.hpp"
#include "openvino/genai/whisper_pipeline.hpp"
int main(int argc, char* argv[]) try {
if (3 > argc) {
throw std::runtime_error(std::string{"Usage: "} + argv[0] + " <MODEL_DIR> \"<WAV_FILE_PATH>\"");
}
std::filesystem::path models_path = argv[1];
std::string wav_file_path = argv[2];
std::string device = "CPU"; // GPU, NPU can be used as well
ov::genai::WhisperPipeline pipeline(models_path, device);
ov::genai::WhisperGenerationConfig config = pipeline.get_generation_config();
config.max_new_tokens = 100; // increase this based on your speech length
// 'task' and 'language' parameters are supported for multilingual models only
config.language = "<|en|>"; // can switch to <|zh|> for Chinese language
config.task = "transcribe";
config.return_timestamps = true;
// Pipeline expects normalized audio with Sample Rate of 16kHz
ov::genai::RawSpeechInput raw_speech = utils::audio::read_wav(wav_file_path);
auto result = pipeline.generate(raw_speech, config);
std::cout << result << "\n";
for (auto& chunk : *result.chunks) {
std::cout << "timestamps: [" << chunk.start_ts << ", " << chunk.end_ts << "] text: " << chunk.text << "\n";
}
} catch (const std::exception& error) {
try {
std::cerr << error.what() << '\n';
} catch (const std::ios_base::failure&) {
}
return EXIT_FAILURE;
} catch (...) {
try {
std::cerr << "Non-exception object thrown\n";
} catch (const std::ios_base::failure&) {
}
return EXIT_FAILURE;
}