diff --git a/MeetingAssistant.pro b/MeetingAssistant.pro index 0c7139f..988d692 100644 --- a/MeetingAssistant.pro +++ b/MeetingAssistant.pro @@ -9,6 +9,10 @@ win32 { LIBS += -lole32 -loleaut32 -lmmdevapi } +# 添加调试信息 +QMAKE_CXXFLAGS_RELEASE += /Zi +QMAKE_LFLAGS_RELEASE += /DEBUG /OPT:REF /OPT:ICF + # You can make your code fail to compile if it uses deprecated APIs. # In order to do so, uncomment the following line. #DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000 # disables all the APIs deprecated before Qt 6.0.0 @@ -63,4 +67,7 @@ win32 { $$quote(cmd /c copy /Y \"$$PWD\\third_party\\azure_speech_sdk\\bin\\Microsoft.CognitiveServices.Speech.extension.kws.dll\" \"$$OUT_PWD\\release\\Microsoft.CognitiveServices.Speech.extension.kws.dll\") && \ $$quote(cmd /c copy /Y \"$$PWD\\third_party\\azure_speech_sdk\\bin\\Microsoft.CognitiveServices.Speech.extension.kws.ort.dll\" \"$$OUT_PWD\\release\\Microsoft.CognitiveServices.Speech.extension.kws.ort.dll\") && \ $$quote(cmd /c copy /Y \"$$PWD\\third_party\\azure_speech_sdk\\bin\\Microsoft.CognitiveServices.Speech.extension.lu.dll\" \"$$OUT_PWD\\release\\Microsoft.CognitiveServices.Speech.extension.lu.dll\") -} \ No newline at end of file +} + +# 添加 Windows 调试帮助库 +LIBS += -ldbghelp \ No newline at end of file diff --git a/src/azurespeechapi.cpp b/src/azurespeechapi.cpp index bb137a0..32255b1 100644 --- a/src/azurespeechapi.cpp +++ b/src/azurespeechapi.cpp @@ -8,17 +8,22 @@ using namespace Microsoft::CognitiveServices::Speech::Audio; AzureSpeechAPI::AzureSpeechAPI(QObject *parent) : QObject(parent) , isInitialized(false) + , logger(std::make_unique()) { + LOG_INFO("AzureSpeechAPI 初始化"); } AzureSpeechAPI::~AzureSpeechAPI() { + LOG_INFO("AzureSpeechAPI 析构"); stopRecognitionAndTranslation(); } void AzureSpeechAPI::initialize(const QString &subscriptionKey, const QString ®ion) { try { + LOG_INFO(QString("开始初始化 Azure Speech 服务,区域: %1").arg(region)); + // 创建语音配置 speechConfig = SpeechConfig::FromSubscription(subscriptionKey.toStdString(), region.toStdString()); @@ -28,62 +33,121 @@ void AzureSpeechAPI::initialize(const QString &subscriptionKey, const QString &r speechConfig->SetProperty(PropertyId::SpeechServiceConnection_EndSilenceTimeoutMs, "1000"); isInitialized = true; + LOG_INFO("Azure Speech 服务初始化成功"); emit statusChanged("Azure Speech服务初始化成功"); } catch (const std::exception& e) { - emit error(QString("初始化失败: %1").arg(e.what())); + QString errorMsg = QString("初始化失败: %1").arg(e.what()); + LOG_ERROR(errorMsg); + emit error(errorMsg); } } void AzureSpeechAPI::startRecognitionAndTranslation(const QString &sourceLanguage, const QString &targetLanguage) { if (!isInitialized) { + LOG_ERROR("请先初始化Azure Speech服务"); emit error("请先初始化Azure Speech服务"); return; } try { + LOG_INFO(QString("开始语音识别和翻译,源语言: %1, 目标语言: %2") + .arg(sourceLanguage) + .arg(targetLanguage)); + currentSourceLanguage = sourceLanguage; currentTargetLanguage = targetLanguage; // 创建翻译配置 translationConfig = SpeechTranslationConfig::FromSubscription(speechConfig->GetSubscriptionKey(), speechConfig->GetRegion()); + if (!translationConfig) { + LOG_ERROR("创建翻译配置失败"); + emit error("创建翻译配置失败"); + return; + } + translationConfig->SetSpeechRecognitionLanguage(sourceLanguage.toStdString()); translationConfig->AddTargetLanguage(targetLanguage.toStdString()); // 创建音频流 audioStream = PushAudioInputStream::Create(); + if (!audioStream) { + LOG_ERROR("创建音频流失败"); + emit error("创建音频流失败"); + return; + } // 创建音频配置 auto audioConfig = AudioConfig::FromStreamInput(audioStream); + if (!audioConfig) { + LOG_ERROR("创建音频配置失败"); + emit error("创建音频配置失败"); + return; + } // 创建识别器 recognizer = TranslationRecognizer::FromConfig(translationConfig, audioConfig); + if (!recognizer) { + LOG_ERROR("创建识别器失败"); + emit error("创建识别器失败"); + return; + } // 设置事件处理 recognizer->Recognized.Connect([this](const TranslationRecognitionEventArgs& e) { - if (e.Result->Reason == ResultReason::TranslatedSpeech) { - emit recognitionResult(QString::fromStdString(e.Result->Text)); - - // 获取翻译结果 - auto translations = e.Result->Translations; - if (translations.find(currentTargetLanguage.toStdString()) != translations.end()) { - QString translatedText = QString::fromStdString( - translations[currentTargetLanguage.toStdString()]); - emit translationResult(translatedText); + try { + if (e.Result->Reason == ResultReason::TranslatedSpeech) { + QString text = QString::fromStdString(e.Result->Text); + LOG_INFO(QString("识别结果: %1").arg(text)); + emit recognitionResult(text); + + // 获取翻译结果 + auto translations = e.Result->Translations; + if (translations.find(currentTargetLanguage.toStdString()) != translations.end()) { + QString translatedText = QString::fromStdString( + translations[currentTargetLanguage.toStdString()]); + LOG_INFO(QString("翻译结果: %1").arg(translatedText)); + emit translationResult(translatedText); + } else { + LOG_ERROR(QString("未找到目标语言 %1 的翻译结果").arg(currentTargetLanguage)); + } + } else if (e.Result->Reason == ResultReason::NoMatch) { + LOG_INFO("未检测到语音"); + } else if (e.Result->Reason == ResultReason::Canceled) { + LOG_ERROR("识别被取消"); } + } catch (const std::exception& ex) { + LOG_ERROR(QString("处理识别结果时发生异常: %1").arg(ex.what())); + emit error(QString("处理识别结果时发生异常: %1").arg(ex.what())); } }); recognizer->Canceled.Connect([this](const TranslationRecognitionCanceledEventArgs& e) { + LOG_ERROR(QString("识别取消: %1").arg(QString::fromStdString(e.ErrorDetails))); emit error(QString("识别取消: %1").arg(QString::fromStdString(e.ErrorDetails))); }); + recognizer->SessionStarted.Connect([this](const SessionEventArgs&) { + LOG_INFO("识别会话开始"); + }); + + recognizer->SessionStopped.Connect([this](const SessionEventArgs&) { + LOG_INFO("识别会话结束"); + }); + // 开始连续识别 - recognizer->StartContinuousRecognitionAsync(); - emit statusChanged("开始语音识别和翻译"); + try { + recognizer->StartContinuousRecognitionAsync().wait(); + LOG_INFO("开始语音识别和翻译"); + emit statusChanged("开始语音识别和翻译"); + } catch (const std::exception& e) { + LOG_ERROR(QString("启动连续识别失败: %1").arg(e.what())); + emit error(QString("启动连续识别失败: %1").arg(e.what())); + } } catch (const std::exception& e) { + LOG_ERROR(QString("启动识别失败: %1").arg(e.what())); emit error(QString("启动识别失败: %1").arg(e.what())); } } @@ -92,13 +156,16 @@ void AzureSpeechAPI::stopRecognitionAndTranslation() { if (recognizer) { try { - recognizer->StopContinuousRecognitionAsync(); + LOG_INFO("停止语音识别和翻译"); + recognizer->StopContinuousRecognitionAsync().wait(); recognizer.reset(); audioStream.reset(); emit statusChanged("停止语音识别和翻译"); } catch (const std::exception& e) { - emit error(QString("停止识别失败: %1").arg(e.what())); + QString errorMsg = QString("停止识别失败: %1").arg(e.what()); + LOG_ERROR(errorMsg); + emit error(errorMsg); } } } @@ -106,6 +173,7 @@ void AzureSpeechAPI::stopRecognitionAndTranslation() void AzureSpeechAPI::processAudioData(const QByteArray &audioData) { if (!audioStream) { + LOG_ERROR("音频流未初始化"); emit error("音频流未初始化"); return; } @@ -114,10 +182,105 @@ void AzureSpeechAPI::processAudioData(const QByteArray &audioData) // 将QByteArray转换为std::vector std::vector audioBuffer(audioData.begin(), audioData.end()); + // 写入音频数据,确保大小不超过uint32_t的最大值 + if (audioBuffer.size() > UINT32_MAX) { + LOG_ERROR("音频数据块太大"); + emit error("音频数据块太大"); + return; + } + + // 使用静态计数器来减少日志输出频率 + static int logCounter = 0; + if (++logCounter % 10 == 0) { // 每10个数据块记录一次 + LOG_INFO(QString("处理音频数据,大小: %1 字节").arg(audioBuffer.size())); + } + // 写入音频数据 - audioStream->Write(audioBuffer.data(), audioBuffer.size()); + try { + audioStream->Write(audioBuffer.data(), static_cast(audioBuffer.size())); + if (logCounter % 10 == 0) { + LOG_INFO("音频数据写入成功"); + } + } catch (const std::exception& e) { + LOG_ERROR(QString("写入音频数据失败: %1").arg(e.what())); + emit error(QString("写入音频数据失败: %1").arg(e.what())); + } } catch (const std::exception& e) { + LOG_ERROR(QString("处理音频数据失败: %1").arg(e.what())); emit error(QString("处理音频数据失败: %1").arg(e.what())); } +} + +void AzureSpeechAPI::testConnection(const QString &key, const QString ®ion) +{ + try { + LOG_INFO(QString("开始测试连接,区域: %1").arg(region)); + + // 1. 构造配置 + auto config = SpeechConfig::FromSubscription( + key.toStdString(), + region.toStdString() + ); + + if (!config) { + QString errorMsg = "Failed to create speech config"; + LOG_ERROR(errorMsg); + emit error(errorMsg); + return; + } + LOG_INFO("Speech config created successfully"); + + // 创建音频流 + auto audioStream = PushAudioInputStream::Create(); + if (!audioStream) { + QString errorMsg = "Failed to create audio stream"; + LOG_ERROR(errorMsg); + emit error(errorMsg); + return; + } + LOG_INFO("Audio stream created successfully"); + + // 创建音频配置 + auto audioConfig = AudioConfig::FromStreamInput(audioStream); + if (!audioConfig) { + QString errorMsg = "Failed to create audio config"; + LOG_ERROR(errorMsg); + emit error(errorMsg); + return; + } + + // 生成100ms的静音数据 + std::vector silenceData(16000 * 2 * 0.1); // 16kHz, 16-bit, 100ms + audioStream->Write(silenceData.data(), static_cast(silenceData.size())); + audioStream->Close(); + LOG_INFO("Silence data written to stream"); + + // 创建识别器 + auto recognizer = SpeechRecognizer::FromConfig(config, audioConfig); + if (!recognizer) { + QString errorMsg = "Failed to create speech recognizer"; + LOG_ERROR(errorMsg); + emit error(errorMsg); + return; + } + LOG_INFO("Speech recognizer created successfully"); + + // 进行识别 + auto result = recognizer->RecognizeOnceAsync().get(); + if (result->Reason == ResultReason::RecognizedSpeech) { + LOG_INFO("Connection test successful"); + emit statusChanged("连接测试成功"); + } else { + QString errorMsg = QString("Connection test failed: %1").arg(static_cast(result->Reason)); + LOG_ERROR(errorMsg); + emit error(errorMsg); + } + } + catch (const std::exception &e) { + QString msg = QString("连接测试异常: %1").arg(e.what()); + LOG_ERROR(msg); + emit statusChanged(msg); + emit error(msg); + } } \ No newline at end of file diff --git a/src/azurespeechapi.h b/src/azurespeechapi.h index 4776465..f1c56e0 100644 --- a/src/azurespeechapi.h +++ b/src/azurespeechapi.h @@ -7,6 +7,11 @@ #include #include #include +#include "logger.h" + +using namespace Microsoft::CognitiveServices::Speech; +using namespace Microsoft::CognitiveServices::Speech::Translation; +using namespace Microsoft::CognitiveServices::Speech::Audio; class AzureSpeechAPI : public QObject { @@ -28,6 +33,8 @@ class AzureSpeechAPI : public QObject // 处理音频数据 void processAudioData(const QByteArray &audioData); + void testConnection(const QString &key, const QString ®ion); + signals: void recognitionResult(const QString &text); void translationResult(const QString &text); @@ -35,14 +42,15 @@ class AzureSpeechAPI : public QObject void statusChanged(const QString &status); private: - std::shared_ptr speechConfig; - std::shared_ptr translationConfig; - std::shared_ptr recognizer; - std::shared_ptr audioStream; + std::shared_ptr speechConfig; + std::shared_ptr translationConfig; + std::shared_ptr recognizer; + std::shared_ptr audioStream; bool isInitialized; QString currentSourceLanguage; QString currentTargetLanguage; + std::unique_ptr logger; }; #endif // AZURESPEECHAPI_H \ No newline at end of file diff --git a/src/logger.cpp b/src/logger.cpp index 6e7db8c..5be897e 100644 --- a/src/logger.cpp +++ b/src/logger.cpp @@ -1,43 +1,82 @@ #include "logger.h" +#include +#include +#include +#include +#include +#include -Logger& Logger::instance() -{ - static Logger instance; - return instance; -} +QFile Logger::logFile; +QTextStream Logger::logStream; +bool Logger::isInitialized = false; -Logger::Logger(QObject *parent) : QObject(parent) +Logger::Logger(QObject *parent) + : QObject(parent) { - QString logPath = getLogPath(); - logFile.setFileName(logPath); - if (logFile.open(QIODevice::WriteOnly | QIODevice::Append | QIODevice::Text)) { - logStream.setDevice(&logFile); + if (!isInitialized) { + QString logPath = getLogPath(); + QDir().mkpath(QFileInfo(logPath).path()); + + logFile.setFileName(logPath); + if (logFile.open(QIODevice::WriteOnly | QIODevice::Append | QIODevice::Text)) { + logStream.setDevice(&logFile); + isInitialized = true; + } } } Logger::~Logger() { if (logFile.isOpen()) { + logStream.flush(); logFile.close(); } } -void Logger::log(const QString& message, const QString& level) +QString Logger::getLogPath() +{ + return QCoreApplication::applicationDirPath() + "/logs/meeting_assistant.log"; +} + +QString Logger::formatLogMessage(const QString &message, const char* file, int line) { - QMutexLocker locker(&mutex); + QString timestamp = QDateTime::currentDateTime().toString("yyyy-MM-dd hh:mm:ss.zzz"); + QString threadId = QString::number((quintptr)QThread::currentThreadId()); + QString fileName = file ? QFileInfo(file).fileName() : "unknown"; + QString lineNumber = line > 0 ? QString::number(line) : "0"; - if (logFile.isOpen()) { - QString timestamp = QDateTime::currentDateTime().toString("yyyy-MM-dd hh:mm:ss.zzz"); - logStream << QString("[%1] [%2] %3\n") - .arg(timestamp) - .arg(level) - .arg(message); - logStream.flush(); + return QString("[%1][Thread-%2][%3:%4] %5") + .arg(timestamp) + .arg(threadId) + .arg(fileName) + .arg(lineNumber) + .arg(message); +} + +void Logger::log(const QString &message, const char* file, int line) +{ + if (!isInitialized) { + return; } + + QString formattedMessage = formatLogMessage(message, file, line); + logStream << formattedMessage << Qt::endl; + logStream.flush(); + + // 同时输出到控制台 + qDebug().noquote() << formattedMessage; } -QString Logger::getLogPath() const +void Logger::logError(const QString &message, const char* file, int line) { - QString exePath = QCoreApplication::applicationDirPath(); - return exePath + "/MeetingAssistant.log"; + if (!isInitialized) { + return; + } + + QString formattedMessage = formatLogMessage("ERROR: " + message, file, line); + logStream << formattedMessage << Qt::endl; + logStream.flush(); + + // 同时输出到控制台 + qDebug().noquote() << formattedMessage; } \ No newline at end of file diff --git a/src/logger.h b/src/logger.h index bf06dea..3bee6b6 100644 --- a/src/logger.h +++ b/src/logger.h @@ -5,26 +5,29 @@ #include #include #include -#include -#include +#include +#include class Logger : public QObject { Q_OBJECT public: - static Logger& instance(); - void log(const QString& message, const QString& level = "INFO"); - -private: explicit Logger(QObject *parent = nullptr); ~Logger(); - Logger(const Logger&) = delete; - Logger& operator=(const Logger&) = delete; - QFile logFile; - QTextStream logStream; - QMutex mutex; - QString getLogPath() const; + static void log(const QString &message, const char* file = nullptr, int line = 0); + static void logError(const QString &message, const char* file = nullptr, int line = 0); + static QString getLogPath(); + +private: + static QString formatLogMessage(const QString &message, const char* file = nullptr, int line = 0); + static QFile logFile; + static QTextStream logStream; + static bool isInitialized; }; +// 定义日志宏 +#define LOG_INFO(msg) Logger::log(msg, __FILE__, __LINE__) +#define LOG_ERROR(msg) Logger::logError(msg, __FILE__, __LINE__) + #endif // LOGGER_H \ No newline at end of file diff --git a/src/main.cpp b/src/main.cpp index f44c022..60ca3ae 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1,12 +1,91 @@ #include #include +#include +#include +#include +#include +#include #include "mainwindow.h" +#include "logger.h" + +// 设置崩溃转储文件的保存路径 +QString getDumpFilePath() { + QString dumpDir = QCoreApplication::applicationDirPath() + "/dumps"; + QDir().mkpath(dumpDir); + return dumpDir + "/crash_" + QDateTime::currentDateTime().toString("yyyyMMdd_hhmmss") + ".dmp"; +} + +// 崩溃处理函数 +LONG WINAPI TopLevelExceptionHandler(EXCEPTION_POINTERS* pExceptionInfo) { + static bool isHandling = false; + if (isHandling) { + return EXCEPTION_CONTINUE_SEARCH; + } + isHandling = true; + + try { + QString dumpPath = getDumpFilePath(); + HANDLE hFile = CreateFileW( + dumpPath.toStdWString().c_str(), + GENERIC_WRITE, + 0, + NULL, + CREATE_ALWAYS, + FILE_ATTRIBUTE_NORMAL, + NULL + ); + + if (hFile != INVALID_HANDLE_VALUE) { + MINIDUMP_EXCEPTION_INFORMATION exInfo; + exInfo.ExceptionPointers = pExceptionInfo; + exInfo.ThreadId = GetCurrentThreadId(); + exInfo.ClientPointers = TRUE; + + // 创建完整的内存转储 + MiniDumpWriteDump( + GetCurrentProcess(), + GetCurrentProcessId(), + hFile, + static_cast(MiniDumpNormal | MiniDumpWithFullMemory | MiniDumpWithHandleData), + &exInfo, + NULL, + NULL + ); + + CloseHandle(hFile); + + // 记录崩溃信息到日志 + Logger logger; + logger.logError(QString("程序崩溃,转储文件已保存到: %1").arg(dumpPath)); + logger.logError(QString("异常代码: 0x%1").arg(pExceptionInfo->ExceptionRecord->ExceptionCode, 8, 16, QChar('0'))); + logger.logError(QString("异常地址: 0x%1").arg((quintptr)pExceptionInfo->ExceptionRecord->ExceptionAddress, 8, 16, QChar('0'))); + } + } + catch (...) { + // 如果转储过程中发生异常,至少记录一下 + Logger logger; + logger.logError("程序崩溃,但无法创建转储文件"); + } + + return EXCEPTION_CONTINUE_SEARCH; +} int main(int argc, char *argv[]) { + // 设置异常处理 + SetUnhandledExceptionFilter(TopLevelExceptionHandler); + QApplication app(argc, argv); app.setQuitOnLastWindowClosed(true); + // 设置应用程序信息 + QCoreApplication::setOrganizationName("MeetingAssistant"); + QCoreApplication::setApplicationName("MeetingAssistant"); + + // 创建日志目录 + QString logDir = QCoreApplication::applicationDirPath() + "/logs"; + QDir().mkpath(logDir); + MainWindow window; window.show(); diff --git a/src/mainwindow.cpp b/src/mainwindow.cpp index ae829cf..74cc589 100644 --- a/src/mainwindow.cpp +++ b/src/mainwindow.cpp @@ -1,26 +1,37 @@ #include "mainwindow.h" #include "./ui_mainwindow.h" #include +#include +#include +#include +#include MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent) , ui(new Ui::MainWindow) , audioProcessor(new AudioProcessor(this)) - , azureSpeechApi(new AzureSpeechAPI(this)) + , azureSpeechAPI(new AzureSpeechAPI(this)) + , logger(new Logger(this)) { ui->setupUi(this); - + + // 设置配置文件路径 + configFilePath = QCoreApplication::applicationDirPath() + "/config.ini"; + + // 加载配置 + loadConfig(); + // 连接信号和槽 connect(audioProcessor, &AudioProcessor::audioDataReceived, this, &MainWindow::onAudioDataReceived); - connect(azureSpeechApi, &AzureSpeechAPI::recognitionResult, + connect(azureSpeechAPI, &AzureSpeechAPI::recognitionResult, this, &MainWindow::onRecognitionResult); - connect(azureSpeechApi, &AzureSpeechAPI::translationResult, + connect(azureSpeechAPI, &AzureSpeechAPI::translationResult, this, &MainWindow::onTranslationResult); - connect(azureSpeechApi, &AzureSpeechAPI::error, + connect(azureSpeechAPI, &AzureSpeechAPI::error, this, &MainWindow::onError); - connect(azureSpeechApi, &AzureSpeechAPI::statusChanged, + connect(azureSpeechAPI, &AzureSpeechAPI::statusChanged, this, &MainWindow::onStatusChanged); // 连接按钮信号 @@ -28,8 +39,8 @@ MainWindow::MainWindow(QWidget *parent) this, &MainWindow::onStartButtonClicked); connect(ui->stopButton, &QPushButton::clicked, this, &MainWindow::onStopButtonClicked); - connect(ui->testConnectionButton, &QPushButton::clicked, - this, &MainWindow::onTestConnectionButtonClicked); + connect(ui->testButton, &QPushButton::clicked, this, &MainWindow::onTestButtonClicked); + connect(ui->saveConfigButton, &QPushButton::clicked, this, &MainWindow::onSaveConfigClicked); // 初始化UI状态 ui->stopButton->setEnabled(false); @@ -39,25 +50,25 @@ MainWindow::~MainWindow() { delete ui; delete audioProcessor; - delete azureSpeechApi; + delete azureSpeechAPI; + delete logger; } void MainWindow::onStartButtonClicked() { - currentAppId = ui->appIdInput->text(); - currentApiKey = ui->appIdInput->text(); - currentRegion = ui->regionInput->text(); + QString key = ui->keyEdit->text(); + QString region = ui->regionEdit->text(); - if (currentAppId.isEmpty() || currentApiKey.isEmpty() || currentRegion.isEmpty()) { + if (key.isEmpty() || region.isEmpty()) { QMessageBox::warning(this, "错误", "请填写完整的Azure Speech服务配置信息"); return; } // 初始化Azure Speech服务 - azureSpeechApi->initialize(currentApiKey, currentRegion); + azureSpeechAPI->initialize(key, region); // 开始语音识别和翻译 - azureSpeechApi->startRecognitionAndTranslation("zh-CN", "en-US"); + azureSpeechAPI->startRecognitionAndTranslation("en-US", "zh-CN"); // 开始音频处理 audioProcessor->startRecording(); @@ -65,7 +76,8 @@ void MainWindow::onStartButtonClicked() // 更新UI状态 ui->startButton->setEnabled(false); ui->stopButton->setEnabled(true); - ui->testConnectionButton->setEnabled(false); + ui->testButton->setEnabled(false); + ui->saveConfigButton->setEnabled(false); } void MainWindow::onStopButtonClicked() @@ -74,32 +86,18 @@ void MainWindow::onStopButtonClicked() audioProcessor->stopRecording(); // 停止语音识别和翻译 - azureSpeechApi->stopRecognitionAndTranslation(); + azureSpeechAPI->stopRecognitionAndTranslation(); // 更新UI状态 ui->startButton->setEnabled(true); ui->stopButton->setEnabled(false); - ui->testConnectionButton->setEnabled(true); -} - -void MainWindow::onTestConnectionButtonClicked() -{ - currentAppId = ui->appIdInput->text(); - currentApiKey = ui->appIdInput->text(); - currentRegion = ui->regionInput->text(); - - if (currentAppId.isEmpty() || currentApiKey.isEmpty() || currentRegion.isEmpty()) { - QMessageBox::warning(this, "错误", "请填写完整的Azure Speech服务配置信息"); - return; - } - - // 初始化Azure Speech服务 - azureSpeechApi->initialize(currentApiKey, currentRegion); + ui->testButton->setEnabled(true); + ui->saveConfigButton->setEnabled(true); } void MainWindow::onAudioDataReceived(const QByteArray &data) { - azureSpeechApi->processAudioData(data); + azureSpeechAPI->processAudioData(data); } void MainWindow::onRecognitionResult(const QString &text) @@ -121,4 +119,50 @@ void MainWindow::onError(const QString &message) void MainWindow::onStatusChanged(const QString &status) { ui->statusBar->showMessage(status); +} + +void MainWindow::onSaveConfigClicked() +{ + QString region = ui->regionEdit->text(); + QString key = ui->keyEdit->text(); + + if (region.isEmpty() || key.isEmpty()) { + QMessageBox::warning(this, "错误", "请填写区域和密钥"); + return; + } + + QSettings settings(configFilePath, QSettings::IniFormat); + settings.setValue("Azure/Region", region); + settings.setValue("Azure/Key", key); + + QMessageBox::information(this, "保存成功", "配置已保存"); +} + +void MainWindow::loadConfig() +{ + QSettings settings(configFilePath, QSettings::IniFormat); + QString region = settings.value("Azure/Region").toString(); + QString key = settings.value("Azure/Key").toString(); + + ui->regionEdit->setText(region); + ui->keyEdit->setText(key); + + // 如果配置已存在,启用开始按钮 + if (!region.isEmpty() && !key.isEmpty()) { + ui->startButton->setEnabled(true); + } +} + +void MainWindow::onTestButtonClicked() +{ + QString region = ui->regionEdit->text(); + QString key = ui->keyEdit->text(); + + if (region.isEmpty() || key.isEmpty()) { + QMessageBox::warning(this, "错误", "请填写区域和密钥"); + return; + } + + // 测试连接 + azureSpeechAPI->testConnection(key, region); } \ No newline at end of file diff --git a/src/mainwindow.h b/src/mainwindow.h index 74db725..c490139 100644 --- a/src/mainwindow.h +++ b/src/mainwindow.h @@ -8,6 +8,7 @@ #include #include "audioprocessor.h" #include "azurespeechapi.h" +#include "logger.h" QT_BEGIN_NAMESPACE namespace Ui { class MainWindow; } @@ -24,20 +25,21 @@ class MainWindow : public QMainWindow private slots: void onStartButtonClicked(); void onStopButtonClicked(); - void onTestConnectionButtonClicked(); void onAudioDataReceived(const QByteArray &data); void onRecognitionResult(const QString &text); void onTranslationResult(const QString &text); void onError(const QString &message); void onStatusChanged(const QString &status); + void onTestButtonClicked(); + void onSaveConfigClicked(); + void loadConfig(); private: Ui::MainWindow *ui; AudioProcessor *audioProcessor; - AzureSpeechAPI *azureSpeechApi; - QString currentAppId; - QString currentApiKey; - QString currentRegion; + AzureSpeechAPI *azureSpeechAPI; + Logger *logger; + QString configFilePath; }; #endif // MAINWINDOW_H \ No newline at end of file diff --git a/src/mainwindow.ui b/src/mainwindow.ui index e16763c..1205a80 100644 --- a/src/mainwindow.ui +++ b/src/mainwindow.ui @@ -16,35 +16,79 @@ - - - - - 订阅密钥: - - - - - - - - - - 区域: - - - - - - - - - - 测试连接 - - - - + + + Azure 语音服务配置 + + + + + + + + 区域 + + + + + + + 例如:eastasia + + + + + + + 密钥 + + + + + + + QLineEdit::Password + + + 输入Azure语音服务密钥 + + + + + + + + + + + 保存配置 + + + + + + + 测试连接 + + + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + +