Skip to content

Commit

Permalink
Ensuring pasting of Markdown from Wingman chat dialog (not HTML) #1514
Browse files Browse the repository at this point in the history
  • Loading branch information
dvorka committed Feb 15, 2024
1 parent 1bbf31a commit 5245d9c
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 24 deletions.
17 changes: 16 additions & 1 deletion app/src/qt/dialogs/wingman_dialog.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,22 @@ class WingmanDialog : public QDialog // TODO rename to WingmanDialog
cmdEdit->selectAll();
}
}
std::string getLastAnswer() { return this->lastAnswer; }
/**
* @brief Get the last answer as a Markdown string.
*
* The answer is typically pasted or appended to the Note editor,
* which uses Markdown format (not HTML), so the answer is converted
* to Markdown format.
*/
std::string getLastAnswer() {
if(this->lastAnswer.find("<br/>") != std::string::npos) {
std::string decodedAnswer{this->lastAnswer};
replaceAll("<br/>", "\n", decodedAnswer);
return decodedAnswer;
}

return this->lastAnswer;
}

QPushButton* getAppendButton() const { return appendButton; }
QPushButton* getReplaceButton() const { return replaceButton; }
Expand Down
4 changes: 2 additions & 2 deletions app/src/qt/main_window_presenter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2182,7 +2182,7 @@ void MainWindowPresenter::slotRunWingmanFromDialog(bool showDialog)

// PUSH answer to the chat dialog
this->wingmanDialog->appendAnswerToChat(
"Wingman failed to get answer from the GPT provider.<br/><br/>"+commandWingmanChat.answerHtml,
"Wingman failed to get answer from the GPT provider.<br/><br/>"+commandWingmanChat.answerMarkdown,
"",
this->wingmanDialog->getContextType(),
true
Expand Down Expand Up @@ -2217,7 +2217,7 @@ void MainWindowPresenter::slotRunWingmanFromDialog(bool showDialog)
);
} else {
this->wingmanDialog->appendAnswerToChat(
commandWingmanChat.answerHtml,
commandWingmanChat.answerMarkdown,
answerDescriptor,
this->wingmanDialog->getContextType()
);
Expand Down
6 changes: 4 additions & 2 deletions app/src/qt/note_editor_view.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,13 +209,15 @@ void NoteEditorView::replaceSelectedText(const std::string& text)
setTextCursor(cursor);
}

void NoteEditorView::appendAfterSelectedText(const std::string& phrase) {
void NoteEditorView::appendAfterSelectedText(const std::string& phrase)
{
QTextCursor cursor = textCursor();
cursor.movePosition(QTextCursor::EndOfBlock);
cursor.insertText(" " + QString::fromStdString(phrase));
}

void NoteEditorView::appendAfterCursor(const std::string& phrase) {
void NoteEditorView::appendAfterCursor(const std::string& phrase)
{
textCursor().insertText(QString::fromStdString(phrase));
}

Expand Down
4 changes: 2 additions & 2 deletions lib/src/mind/ai/llm/mock_wingman.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ void MockWingman::chat(CommandWingmanChat& command) {
command.answerLlmModel.assign(this->llmModel);
command.promptTokens=42;
command.answerTokens=42198;
command.answerHtml.assign("chat(MOCK, '"+command.prompt+"')");
command.answerMarkdown.assign("chat(MOCK, '"+command.prompt+"')");

MF_DEBUG("MockWingman::chat() answer:" << command.answerHtml << endl);
MF_DEBUG("MockWingman::chat() answer:" << command.answerMarkdown << endl);
}

} // m8r namespace
21 changes: 12 additions & 9 deletions lib/src/mind/ai/llm/openai_wingman.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,10 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) {
*/
nlohmann::json messageSystemJSon{};
messageSystemJSon["role"] = "system"; // system (instruct GPT who it is), user (user prompts), assistant (GPT answers)
messageSystemJSon["content"] = "You are a helpful assistant.";
messageSystemJSon["content"] =
// "You are a helpful assistant that returns HTML-formatted answers to the user's prompts."
"You are a helpful assistant."
;
// ... more messages like above (with chat history) can be created to provide context
nlohmann::json messageUserJSon{};
messageUserJSon["role"] = "user";
Expand Down Expand Up @@ -218,7 +221,7 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) {
" '" << command.httpResponse << "'" << endl;

command.httpResponse.clear();
command.answerHtml.clear();
command.answerMarkdown.clear();
command.answerTokens = 0;
command.answerLlmModel = llmModel;

Expand Down Expand Up @@ -266,7 +269,7 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) {

command.status = WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR;
command.errorMessage = "Error: unable to parse OpenAI JSon response: '" + command.httpResponse + "'";
command.answerHtml.clear();
command.answerMarkdown.clear();
command.answerTokens = 0;
command.answerLlmModel = llmModel;

Expand Down Expand Up @@ -303,13 +306,13 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) {
if(choice.contains("message")
&& choice["message"].contains("content")
) {
choice["message"]["content"].get_to(command.answerHtml);
choice["message"]["content"].get_to(command.answerMarkdown);
// TODO ask GPT for HTML formatted response
m8r::replaceAll(
"\n",
"<br/>",
command.answerHtml);
MF_DEBUG(" answer (HTML): " << command.answerHtml << endl);
command.answerMarkdown);
MF_DEBUG(" answer (HTML): " << command.answerMarkdown << endl);
}
if(choice.contains("finish_reason")) {
string statusStr{};
Expand All @@ -321,15 +324,15 @@ void OpenAiWingman::curlGet(CommandWingmanChat& command) {
command.errorMessage.assign(
"OpenAI API HTTP required failed with finish_reason: "
+ statusStr);
command.answerHtml.clear();
command.answerMarkdown.clear();
command.answerTokens = 0;
command.answerLlmModel = llmModel;
}
MF_DEBUG(" status: " << command.status << endl);
}
} else {
command.status = m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR;
command.answerHtml.clear();
command.answerMarkdown.clear();
command.answerTokens = 0;
command.answerLlmModel = llmModel;
if(
Expand Down Expand Up @@ -357,7 +360,7 @@ void OpenAiWingman::chat(CommandWingmanChat& command) {

curlGet(command);

MF_DEBUG("OpenAiWingman::chat() answer:" << endl << command.answerHtml << endl);
MF_DEBUG("OpenAiWingman::chat() answer:" << endl << command.answerMarkdown << endl);
}

} // m8r namespace
6 changes: 3 additions & 3 deletions lib/src/mind/ai/llm/wingman.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ enum WingmanStatusCode {
};

/**
* Wingman chat request command pattern must be used as asynchronous requests
* cannot handle that many parameters.
* Wingman chat request command pattern must be used as asynchronous requests.
* As it annot handle that many parameters this structure is used.
*/
struct CommandWingmanChat {
std::string prompt;
Expand All @@ -82,7 +82,7 @@ struct CommandWingmanChat {
std::string answerLlmModel;
int promptTokens;
int answerTokens;
std::string answerHtml;
std::string answerMarkdown;
};


Expand Down
10 changes: 5 additions & 5 deletions lib/test/src/json/json_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ TEST(JSonTestCase, ParseOpenAiResponse)
string answerGptModel{};
int promptTokens{};
int answerTokens{};
string answerHtml{};
string answerMarkdown{};

MF_DEBUG(
"OpenAiWingman::curlGet() parsed response:" << endl
Expand Down Expand Up @@ -106,13 +106,13 @@ TEST(JSonTestCase, ParseOpenAiResponse)
if(choice.contains("message")
&& choice["message"].contains("content")
) {
choice["message"]["content"].get_to(answerHtml);
choice["message"]["content"].get_to(answerMarkdown);
// TODO ask GPT for HTML formatted response
m8r::replaceAll(
"\n",
"<br/>",
answerHtml);
MF_DEBUG(" answer (HTML): " << answerHtml << endl);
answerMarkdown);
MF_DEBUG(" answer (HTML): " << answerMarkdown << endl);
}
if(choice.contains("finish_reason")) {
string statusStr{};
Expand Down Expand Up @@ -142,7 +142,7 @@ TEST(JSonTestCase, ParseOpenAiResponse)
answerGptModel);
EXPECT_EQ(
"LLM answer:<br/>42",
answerHtml);
answerMarkdown);
EXPECT_EQ(
m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_OK,
status);
Expand Down

0 comments on commit 5245d9c

Please sign in to comment.