From aecc386926356022c7af67dc9c3985860ddacd96 Mon Sep 17 00:00:00 2001 From: Yurun Date: Sat, 16 Mar 2024 09:39:19 +0800 Subject: [PATCH] =?UTF-8?q?=E6=94=AF=E6=8C=81=20Google=20Gemini?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 9 +- server/Module/OpenAI/Client/Gemini/Client.php | 139 ++++++++++++++++++ server/composer.json | 1 + 3 files changed, 146 insertions(+), 3 deletions(-) create mode 100644 server/Module/OpenAI/Client/Gemini/Client.php diff --git a/README.md b/README.md index 77ef9de..dc84a50 100644 --- a/README.md +++ b/README.md @@ -88,9 +88,10 @@ imi-ai 是一个 ChatGPT 开源项目,支持聊天、问答、写代码、写 ### 支持的模型厂商 -* [x] OpenAI -* [x] Swoole AI -* [x] ChatGLM3 +* [x] [OpenAI](https://openai.com/) +* [x] [Swoole AI](https://ai.swoole.com/) +* [x] [ChatGLM3](https://github.com/THUDM/ChatGLM3) +* [x] [Google Gemini](https://aistudio.google.com/) ### 其它 @@ -265,6 +266,8 @@ npm run build-only ### 管理后台 +后台默认账号密码都是 `admin` + **目录:**`admin` **环境要求:** diff --git a/server/Module/OpenAI/Client/Gemini/Client.php b/server/Module/OpenAI/Client/Gemini/Client.php new file mode 100644 index 0000000..5be8e7a --- /dev/null +++ b/server/Module/OpenAI/Client/Gemini/Client.php @@ -0,0 +1,139 @@ +getProxy()) + { + $httpConfig['proxy'] = $proxy; + } + $factory = \Gemini::factory()->withApiKey($api->getApiKey()) + ->withHttpClient($client = new \GuzzleHttp\Client($httpConfig)) + ->withStreamHandler(fn (RequestInterface $request): ResponseInterface => $client->send($request, [ + 'stream' => true, // Allows to provide a custom stream handler for the http client. + ])); + $baseUrl = $api->getBaseUrl(); + if (!Text::isEmpty($baseUrl)) + { + $factory->withBaseUrl($baseUrl); + } + $this->client = $factory->make(); + } + + public function getApi(): Api + { + return $this->api; + } + + public function chat(array $params, ?int &$inputTokens = null, ?int &$outputTokens = null): \Iterator + { + try + { + $inputTokens = $outputTokens = 0; + $model = $this->client->generativeModel($params['model']); + $history = []; + $prevContent = null; + foreach ($params['messages'] as $message) + { + if ('system' === $message['role']) + { + $prevContent = $message['content']; + continue; + } + $content = $message['content']; + if (null !== $prevContent) + { + $content = $prevContent . \PHP_EOL . $content; + } + $history[] = Content::parse($content, 'user' === $message['role'] ? Role::USER : Role::MODEL); + $inputTokens += mb_strlen($content); + } + + $generationConfig = new GenerationConfig( + // stopSequences: [ + // 'Title', + // ], + // maxOutputTokens: 800, + temperature: $params['temperature'] ?? null, + topP: $params['top_p'] ?? null, + // topK: 10 + ); + + $stream = $model->withGenerationConfig($generationConfig) + ->streamGenerateContent(...$history); + + yield [ + 'choices' => [ + [ + 'delta' => [ + 'role' => Role::MODEL->value, + 'content' => '', + ], + ], + ], + ]; + + $contents = ''; + foreach ($stream as $response) + { + yield [ + 'choices' => [ + [ + 'delta' => [ + 'content' => $content = $response->text(), + ], + ], + ], + ]; + $contents .= $content; + $content = ''; + } + $outputTokens = mb_strlen($contents); + yield [ + 'choices' => [ + [ + 'delta' => [], + 'finish_reason' => 'stop', + ], + ], + ]; + } + catch (\Throwable $th) + { + $this->api->failed(); + throw $th; + } + } + + public function embedding(array $params): array + { + throw new \RuntimeException('Unsupport method ' . __METHOD__); + } + + public function calcTokens(string $string, string $model): int + { + return mb_strlen($string); + } +} diff --git a/server/composer.json b/server/composer.json index bef23f1..52e89fb 100644 --- a/server/composer.json +++ b/server/composer.json @@ -8,6 +8,7 @@ "ext-swoole": ">=5.0", "ankane/pgvector": "^0.1.2", "gemorroj/archive7z": "^5.6", + "google-gemini-php/client": "^1.0", "guzzlehttp/guzzle": "^7.5", "hashids/hashids": "^5.0", "imiphp/imi": "~2.1.0",