Skip to content

Commit fc96896

Browse files
committed
refactor: rename content to messages
1 parent b44eb0d commit fc96896

File tree

4 files changed

+17
-19
lines changed

4 files changed

+17
-19
lines changed

README.md

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ Gemini-OpenAI-Proxy is a proxy designed to convert the OpenAI API protocol to th
1111
- [Build](#build)
1212
- [Deploy](#deploy)
1313
- [Usage](#usage)
14-
- [Compatibility Testing](#compatibility-testing)
14+
- [Compatibility](#compatibility)
1515
- [License](#license)
1616

1717
---
@@ -106,16 +106,14 @@ Gemini-OpenAI-Proxy offers a straightforward way to integrate OpenAI functionali
106106

107107
If you wish to map `gpt-4-vision-preview` to `gemini-1.5-pro-latest`, you can configure the environment variable `GPT_4_VISION_PREVIEW = gemini-1.5-pro-latest`. This is because `gemini-1.5-pro-latest` now also supports multi-modal data.
108108

109-
These are the corresponding model mappings for your reference. We've aligned the models from our project with the latest offerings from Gemini, ensuring compatibility and seamless integration.
110-
111109
4. **Handle Responses:**
112110
Process the responses from the Gemini-OpenAI-Proxy in the same way you would handle responses from OpenAI.
113111

114112
Now, your application is equipped to leverage OpenAI functionality through the Gemini-OpenAI-Proxy, bridging the gap between OpenAI and applications using the Google Gemini Pro protocol.
115113

116-
## Compatibility Testing
114+
## Compatibility
117115

118-
Gemini-OpenAI-Proxy is designed to seamlessly integrate OpenAI-powered functionalities into applications using the Google Gemini Pro protocol. To ensure comprehensive compatibility, we have conducted testing specifically targeting `chatbox` and `openai translator` functionalities.
116+
- <https://github.com/zhu327/gemini-openai-proxy/issues/4>
119117

120118
---
121119

api/handler.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ func ChatProxyHandler(c *gin.Context) {
8787
return
8888
}
8989

90-
content, err := req.ToGenaiContent()
90+
messages, err := req.ToGenaiMessages()
9191
if err != nil {
9292
c.JSON(http.StatusBadRequest, openai.APIError{
9393
Code: http.StatusBadRequest,
@@ -112,7 +112,7 @@ func ChatProxyHandler(c *gin.Context) {
112112
gemini := adapter.NewGeminiAdapter(client, model)
113113

114114
if !req.Stream {
115-
resp, err := gemini.GenerateContent(ctx, req, content)
115+
resp, err := gemini.GenerateContent(ctx, req, messages)
116116
if err != nil {
117117
log.Printf("genai generate content error %v\n", err)
118118
c.JSON(http.StatusBadRequest, openai.APIError{
@@ -126,7 +126,7 @@ func ChatProxyHandler(c *gin.Context) {
126126
return
127127
}
128128

129-
dataChan, err := gemini.GenerateStreamContent(ctx, req, content)
129+
dataChan, err := gemini.GenerateStreamContent(ctx, req, messages)
130130
if err != nil {
131131
log.Printf("genai generate content error %v\n", err)
132132
c.JSON(http.StatusBadRequest, openai.APIError{

pkg/adapter/chat.go

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -41,15 +41,15 @@ func NewGeminiAdapter(client *genai.Client, model string) *GeminiAdapter {
4141
func (g *GeminiAdapter) GenerateContent(
4242
ctx context.Context,
4343
req *ChatCompletionRequest,
44-
content []*genai.Content,
44+
messages []*genai.Content,
4545
) (*openai.ChatCompletionResponse, error) {
4646
model := g.client.GenerativeModel(g.model)
4747
setGenaiModelByOpenaiRequest(model, req)
4848

4949
cs := model.StartChat()
50-
setGenaiChatHistory(cs, content)
50+
setGenaiChatHistory(cs, messages)
5151

52-
genaiResp, err := cs.SendMessage(ctx, content[len(content)-1].Parts...)
52+
genaiResp, err := cs.SendMessage(ctx, messages[len(messages)-1].Parts...)
5353
if err != nil {
5454
return nil, errors.Wrap(err, "genai send message error")
5555
}
@@ -61,15 +61,15 @@ func (g *GeminiAdapter) GenerateContent(
6161
func (g *GeminiAdapter) GenerateStreamContent(
6262
ctx context.Context,
6363
req *ChatCompletionRequest,
64-
content []*genai.Content,
64+
messages []*genai.Content,
6565
) (<-chan string, error) {
6666
model := g.client.GenerativeModel(g.model)
6767
setGenaiModelByOpenaiRequest(model, req)
6868

6969
cs := model.StartChat()
70-
setGenaiChatHistory(cs, content)
70+
setGenaiChatHistory(cs, messages)
7171

72-
iter := cs.SendMessageStream(ctx, content[len(content)-1].Parts...)
72+
iter := cs.SendMessageStream(ctx, messages[len(messages)-1].Parts...)
7373

7474
dataChan := make(chan string)
7575
go handleStreamIter(g.model, iter, dataChan)
@@ -192,10 +192,10 @@ func convertFinishReason(reason genai.FinishReason) openai.FinishReason {
192192
return openaiFinishReason
193193
}
194194

195-
func setGenaiChatHistory(cs *genai.ChatSession, content []*genai.Content) {
196-
cs.History = make([]*genai.Content, 0, len(content))
197-
if len(content) > 1 {
198-
cs.History = content[:len(content)-1]
195+
func setGenaiChatHistory(cs *genai.ChatSession, messages []*genai.Content) {
196+
cs.History = make([]*genai.Content, 0, len(messages))
197+
if len(messages) > 1 {
198+
cs.History = messages[:len(messages)-1]
199199
}
200200

201201
if len(cs.History) != 0 && cs.History[len(cs.History)-1].Role != genaiRoleModel {

pkg/adapter/struct.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ func (req *ChatCompletionRequest) ToGenaiModel() string {
6060
}
6161
}
6262

63-
func (req *ChatCompletionRequest) ToGenaiContent() ([]*genai.Content, error) {
63+
func (req *ChatCompletionRequest) ToGenaiMessages() ([]*genai.Content, error) {
6464
if req.Model == openai.GPT4VisionPreview {
6565
return req.toVisionGenaiContent()
6666
}

0 commit comments

Comments
 (0)