From 5245d9cbcf8941d50ae6b743d85ee262b8bef7ab Mon Sep 17 00:00:00 2001 From: Martin Dvorak Date: Thu, 15 Feb 2024 18:10:44 +0100 Subject: [PATCH] Ensuring pasting of Markdown from Wingman chat dialog (not HTML) #1514 --- app/src/qt/dialogs/wingman_dialog.h | 17 ++++++++++++++++- app/src/qt/main_window_presenter.cpp | 4 ++-- app/src/qt/note_editor_view.cpp | 6 ++++-- lib/src/mind/ai/llm/mock_wingman.cpp | 4 ++-- lib/src/mind/ai/llm/openai_wingman.cpp | 21 ++++++++++++--------- lib/src/mind/ai/llm/wingman.h | 6 +++--- lib/test/src/json/json_test.cpp | 10 +++++----- 7 files changed, 44 insertions(+), 24 deletions(-) diff --git a/app/src/qt/dialogs/wingman_dialog.h b/app/src/qt/dialogs/wingman_dialog.h index aed70c90..5ecc71a0 100644 --- a/app/src/qt/dialogs/wingman_dialog.h +++ b/app/src/qt/dialogs/wingman_dialog.h @@ -142,7 +142,22 @@ class WingmanDialog : public QDialog // TODO rename to WingmanDialog cmdEdit->selectAll(); } } - std::string getLastAnswer() { return this->lastAnswer; } + /** + * @brief Get the last answer as a Markdown string. + * + * The answer is typically pasted or appended to the Note editor, + * which uses Markdown format (not HTML), so the answer is converted + * to Markdown format. + */ + std::string getLastAnswer() { + if(this->lastAnswer.find("
") != std::string::npos) { + std::string decodedAnswer{this->lastAnswer}; + replaceAll("
", "\n", decodedAnswer); + return decodedAnswer; + } + + return this->lastAnswer; + } QPushButton* getAppendButton() const { return appendButton; } QPushButton* getReplaceButton() const { return replaceButton; } diff --git a/app/src/qt/main_window_presenter.cpp b/app/src/qt/main_window_presenter.cpp index 3d97e3d0..b0eb99b2 100644 --- a/app/src/qt/main_window_presenter.cpp +++ b/app/src/qt/main_window_presenter.cpp @@ -2182,7 +2182,7 @@ void MainWindowPresenter::slotRunWingmanFromDialog(bool showDialog) // PUSH answer to the chat dialog this->wingmanDialog->appendAnswerToChat( - "Wingman failed to get answer from the GPT provider.

"+commandWingmanChat.answerHtml, + "Wingman failed to get answer from the GPT provider.

"+commandWingmanChat.answerMarkdown, "", this->wingmanDialog->getContextType(), true @@ -2217,7 +2217,7 @@ void MainWindowPresenter::slotRunWingmanFromDialog(bool showDialog) ); } else { this->wingmanDialog->appendAnswerToChat( - commandWingmanChat.answerHtml, + commandWingmanChat.answerMarkdown, answerDescriptor, this->wingmanDialog->getContextType() ); diff --git a/app/src/qt/note_editor_view.cpp b/app/src/qt/note_editor_view.cpp index 27f7208e..90518ce1 100644 --- a/app/src/qt/note_editor_view.cpp +++ b/app/src/qt/note_editor_view.cpp @@ -209,13 +209,15 @@ void NoteEditorView::replaceSelectedText(const std::string& text) setTextCursor(cursor); } -void NoteEditorView::appendAfterSelectedText(const std::string& phrase) { +void NoteEditorView::appendAfterSelectedText(const std::string& phrase) +{ QTextCursor cursor = textCursor(); cursor.movePosition(QTextCursor::EndOfBlock); cursor.insertText(" " + QString::fromStdString(phrase)); } -void NoteEditorView::appendAfterCursor(const std::string& phrase) { +void NoteEditorView::appendAfterCursor(const std::string& phrase) +{ textCursor().insertText(QString::fromStdString(phrase)); } diff --git a/lib/src/mind/ai/llm/mock_wingman.cpp b/lib/src/mind/ai/llm/mock_wingman.cpp index e5efe950..481f9379 100644 --- a/lib/src/mind/ai/llm/mock_wingman.cpp +++ b/lib/src/mind/ai/llm/mock_wingman.cpp @@ -41,9 +41,9 @@ void MockWingman::chat(CommandWingmanChat& command) { command.answerLlmModel.assign(this->llmModel); command.promptTokens=42; command.answerTokens=42198; - command.answerHtml.assign("chat(MOCK, '"+command.prompt+"')"); + command.answerMarkdown.assign("chat(MOCK, '"+command.prompt+"')"); - MF_DEBUG("MockWingman::chat() answer:" << command.answerHtml << endl); + MF_DEBUG("MockWingman::chat() answer:" << command.answerMarkdown << endl); } } // m8r namespace diff --git a/lib/src/mind/ai/llm/openai_wingman.cpp b/lib/src/mind/ai/llm/openai_wingman.cpp index 13442d11..b6b7ce77 100644 --- a/lib/src/mind/ai/llm/openai_wingman.cpp +++ b/lib/src/mind/ai/llm/openai_wingman.cpp @@ -91,7 +91,10 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) { */ nlohmann::json messageSystemJSon{}; messageSystemJSon["role"] = "system"; // system (instruct GPT who it is), user (user prompts), assistant (GPT answers) - messageSystemJSon["content"] = "You are a helpful assistant."; + messageSystemJSon["content"] = + // "You are a helpful assistant that returns HTML-formatted answers to the user's prompts." + "You are a helpful assistant." + ; // ... more messages like above (with chat history) can be created to provide context nlohmann::json messageUserJSon{}; messageUserJSon["role"] = "user"; @@ -218,7 +221,7 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) { " '" << command.httpResponse << "'" << endl; command.httpResponse.clear(); - command.answerHtml.clear(); + command.answerMarkdown.clear(); command.answerTokens = 0; command.answerLlmModel = llmModel; @@ -266,7 +269,7 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) { command.status = WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR; command.errorMessage = "Error: unable to parse OpenAI JSon response: '" + command.httpResponse + "'"; - command.answerHtml.clear(); + command.answerMarkdown.clear(); command.answerTokens = 0; command.answerLlmModel = llmModel; @@ -303,13 +306,13 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) { if(choice.contains("message") && choice["message"].contains("content") ) { - choice["message"]["content"].get_to(command.answerHtml); + choice["message"]["content"].get_to(command.answerMarkdown); // TODO ask GPT for HTML formatted response m8r::replaceAll( "\n", "
", - command.answerHtml); - MF_DEBUG(" answer (HTML): " << command.answerHtml << endl); + command.answerMarkdown); + MF_DEBUG(" answer (HTML): " << command.answerMarkdown << endl); } if(choice.contains("finish_reason")) { string statusStr{}; @@ -321,7 +324,7 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) { command.errorMessage.assign( "OpenAI API HTTP required failed with finish_reason: " + statusStr); - command.answerHtml.clear(); + command.answerMarkdown.clear(); command.answerTokens = 0; command.answerLlmModel = llmModel; } @@ -329,7 +332,7 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) { } } else { command.status = m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR; - command.answerHtml.clear(); + command.answerMarkdown.clear(); command.answerTokens = 0; command.answerLlmModel = llmModel; if( @@ -357,7 +360,7 @@ void OpenAiWingman::chat(CommandWingmanChat& command) { curlGet(command); - MF_DEBUG("OpenAiWingman::chat() answer:" << endl << command.answerHtml << endl); + MF_DEBUG("OpenAiWingman::chat() answer:" << endl << command.answerMarkdown << endl); } } // m8r namespace diff --git a/lib/src/mind/ai/llm/wingman.h b/lib/src/mind/ai/llm/wingman.h index 2c4f3e2e..45ace621 100644 --- a/lib/src/mind/ai/llm/wingman.h +++ b/lib/src/mind/ai/llm/wingman.h @@ -71,8 +71,8 @@ enum WingmanStatusCode { }; /** - * Wingman chat request command pattern must be used as asynchronous requests - * cannot handle that many parameters. + * Wingman chat request command pattern must be used as asynchronous requests. + * As it annot handle that many parameters this structure is used. */ struct CommandWingmanChat { std::string prompt; @@ -82,7 +82,7 @@ struct CommandWingmanChat { std::string answerLlmModel; int promptTokens; int answerTokens; - std::string answerHtml; + std::string answerMarkdown; }; diff --git a/lib/test/src/json/json_test.cpp b/lib/test/src/json/json_test.cpp index 5054a186..9122ca1f 100644 --- a/lib/test/src/json/json_test.cpp +++ b/lib/test/src/json/json_test.cpp @@ -74,7 +74,7 @@ TEST(JSonTestCase, ParseOpenAiResponse) string answerGptModel{}; int promptTokens{}; int answerTokens{}; - string answerHtml{}; + string answerMarkdown{}; MF_DEBUG( "OpenAiWingman::curlGet() parsed response:" << endl @@ -106,13 +106,13 @@ TEST(JSonTestCase, ParseOpenAiResponse) if(choice.contains("message") && choice["message"].contains("content") ) { - choice["message"]["content"].get_to(answerHtml); + choice["message"]["content"].get_to(answerMarkdown); // TODO ask GPT for HTML formatted response m8r::replaceAll( "\n", "
", - answerHtml); - MF_DEBUG(" answer (HTML): " << answerHtml << endl); + answerMarkdown); + MF_DEBUG(" answer (HTML): " << answerMarkdown << endl); } if(choice.contains("finish_reason")) { string statusStr{}; @@ -142,7 +142,7 @@ TEST(JSonTestCase, ParseOpenAiResponse) answerGptModel); EXPECT_EQ( "LLM answer:
42", - answerHtml); + answerMarkdown); EXPECT_EQ( m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_OK, status);