跳转至

Google Gemini

1. 概述

Google 推出的多模态人工智能模型,旨在处理多种数据类型,包括文本、图像、音频、视频和代码。

2. 请求说明

  • 请求方法:POST
  • 请求地址:

    https://gateway.serevixai.ai/v1/chat/completions
    

3. 请求参数

3.1 Header 参数

参数名称 类型 必填 说明 示例值
Content-Type string 设置请求头类型,必须为 application/json application/json
Accept string 设置响应类型,建议统一为 application/json application/json
Authorization string 身份验证所需的 API_KEY,格式 Bearer $YOUR_API_KEY Bearer $YOUR_API_KEY

3.2 Body 参数 (application/json)

参数名称 类型 必填 说明 示例
model string 要使用的模型 ID。详见模型列表列出的可用版本,如 gemini-2.5-flash gemini-2.5-flash
messages array 聊天消息列表,格式与 OpenAI 兼容。数组中的每个对象包含 role(角色) 与 content(内容)。 [{"role": "user","content": "你好"}]
role string 消息角色,可选值:systemuserassistant user
content string/array 消息的具体内容。 你好,请给我讲个笑话。
temperature number 采样温度,取值 0~2。数值越大,输出越随机;数值越小,输出越集中和确定。 0.7
top_p number 另一种调节采样分布的方式,取值 0~1。和 temperature 通常二选一设置。 0.9
n number 为每条输入消息生成多少条回复。 1
stream boolean 是否开启流式输出。设置为 true 时,返回类似 ChatGPT 的流式数据。 false
stop string 最多可指定 4 个字符串,一旦生成的内容出现这几个字符串之一,就停止生成更多 tokens。 "\n"
max_tokens number 单次回复可生成的最大 token 数量,受模型上下文长度限制。 1024
presence_penalty number -2.0 \~ 2.0。正值会鼓励模型输出更多新话题,负值会降低输出新话题的概率。 0
frequency_penalty number -2.0 \~ 2.0。正值会降低模型重复字句的频率,负值会提高重复字句出现的概率。 0
reasoning_effort string 用来控制模型在推理任务中投入多少“计算精力”。目前只有gemini-2.5-flash-preview-04-17支持。支持low medium high none。默认为low low
web_search_options object 用来控制是否开启google搜索提示依据。 {}

4. 请求示例

4.1 聊天对话

POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY

{
    "model": "gemini-2.5-flash",
    "messages": [
        {
            "role": "user",
            "content": "你好,给我科普一下量子力学吧"
        }
    ]
}
curl https://gateway.serevixai.ai/v1/chat/completions \
    -H "Content-Type: application/json" \
    -H "Accept: application/json" \
    -H "Authorization: Bearer $YOUR_API_KEY" \
    -d "{
    \"model\": \"gemini-2.5-flash\",
    \"messages\": [{
        \"role\": \"user\",
        \"content\": \"你好,给我科普一下量子力学吧\"
    }]
}"
package main

import (
    "context"
    "fmt"

    "github.com/openai/openai-go"
    "github.com/openai/openai-go/option"
)

func main() {
    apiKey := "sk-123456789012345678901234567890123456789012345678"

    client := openai.NewClient(
        option.WithAPIKey(apiKey),
        option.WithBaseURL("https://gateway.serevixai.ai/v1"),
    )

    resp, err := client.Chat.Completions.New(
        context.Background(),
        openai.ChatCompletionNewParams{
            Model: "gemini-2.5-flash",
            Messages: []openai.ChatCompletionMessageParamUnion{
                openai.UserMessage("你好,给我科普一下量子力学吧"),
            },
        },
    )

    if err != nil {
        fmt.Println("error:", err)
        return
    }

    fmt.Println(resp.Choices[0].Message.Content)
}
#!/usr/bin/env python3

from openai import OpenAI

def main():
    api_key = "sk-123456789012345678901234567890123456789012345678"

    client = OpenAI(
        api_key=api_key,
        base_url="https://gateway.serevixai.ai/v1"
    )

    response = client.chat.completions.create(
        model="gemini-2.5-flash",
        messages=[
            {"role": "user", "content": "你好,给我科普一下量子力学吧"}
        ]
    )

    print(response.choices[0].message.content)

if __name__ == "__main__":
    main()

4.2 媒体文件理解

媒体文件理解功能,支持理解文档、图像、音频、视频,详细支持类型请参照下方“支持的媒体类型”。

POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY

{
    "model": "gemini-2.5-flash",
    "messages": [
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": "这张图片里有什么?"
                },
                {
                    "type": "image_url",
                    "image_url": {
                        "url": "data:image/jpeg;base64,${base64_image}"
                    }
                }
            ]
        }
    ]
}
base64_image=$(base64 -i "Path/to/agi/image.jpeg");
curl https://gateway.serevixai.ai/v1/chat/completions \
    -H "Content-Type: application/json" \
    -H "Accept: application/json" \
    -H "Authorization: Bearer $YOUR_API_KEY" \
    -d "{
    \"model\": \"gemini-2.5-flash\",
    \"messages\": [{
        \"role\": \"user\",
        \"content\": [{
                \"type\": \"text\",
                \"text\": \"这张图片里有什么?\"
            },
            {
                \"type\": \"image_url\",
                \"image_url\": {
                    \"url\": \"data:image/jpeg;base64,${base64_image}\"
                }
            }
        ]
    }]
}"
package main

import (
    "context"
    "encoding/base64"
    "fmt"
    "os"

    "github.com/openai/openai-go"
    "github.com/openai/openai-go/option"
)

func main() {
    apiKey := "sk-123456789012345678901234567890123456789012345678"

    client := openai.NewClient(
        option.WithAPIKey(apiKey),
        option.WithBaseURL("https://gateway.serevixai.ai/v1"),
    )

    imagePath := "Path/to/agi/image.jpeg"
    imageBytes, err := os.ReadFile(imagePath)
    if err != nil {
        fmt.Println("error:", err)
        return
    }

    base64Image := base64.StdEncoding.EncodeToString(imageBytes)
    imageURL := "data:image/jpeg;base64," + base64Image

    resp, err := client.Chat.Completions.New(
        context.Background(),
        openai.ChatCompletionNewParams{
            Model: "gemini-2.5-flash",
            Messages: []openai.ChatCompletionMessageParamUnion{
                openai.UserMessage([]openai.ChatCompletionContentPartUnionParam{
                    openai.TextContentPart("这张图片里有什么?"),
                    openai.ImageContentPart(openai.ChatCompletionContentPartImageImageURLParam{
                        URL: imageURL,
                    }),
                }),
            },
        },
    )

    if err != nil {
        fmt.Println("error:", err)
        return
    }

    fmt.Println(resp.Choices[0].Message.Content)
}
#!/usr/bin/env python3

import base64
from openai import OpenAI

def main():
    api_key = "sk-123456789012345678901234567890123456789012345678"

    client = OpenAI(
        api_key=api_key,
        base_url="https://gateway.serevixai.ai/v1"
    )

    image_path = "Path/to/agi/image.jpeg"
    with open(image_path, "rb") as f:
        image_bytes = f.read()

    base64_image = base64.b64encode(image_bytes).decode("utf-8")
    image_url = f"data:image/jpeg;base64,{base64_image}"

    response = client.chat.completions.create(
        model="gemini-2.5-flash",
        messages=[{
            "role": "user",
            "content": [
                {"type": "text", "text": "这张图片里有什么?"},
                {"type": "image_url", "image_url": {"url": image_url}}
            ]
        }]
    )

    print(response.choices[0].message.content)

if __name__ == "__main__":
    main()
支持的媒体类型 **图片类型:** `image/png` `image/jpeg` `image/webp` `image/heic` `image/heif` **音频类型:** `audio/wav` `audio/mp3` `audio/aiff` `audio/aac` `audio/ogg` `audio/flac` **视频类型:** `video/mp4` `video/mpeg` `video/mov` `video/avi` `video/x-flv` `video/mpg` `video/webm` `video/wmv` `video/3gpp` **文档类型:** `application/pdf` `application/x-javascript` `text/javascript` `application/x-python` `text/x-python` `text/plain` `text/html` `text/css` `text/md` `text/csv` `text/xml` `text/rtf`

4.3 函数调用

POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY

{
    "model": "gemini-2.5-flash",
    "messages": [{
        "role": "user",
        "content": "What's the weather like in Boston today?"
    }],
    "tools": [{
        "type": "function",
        "function": {
            "name": "get_current_weather",
            "description": "Get the current weather in a given location",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "The city and state, e.g. San Francisco, CA"
                    },
                    "unit": {
                        "type": "string",
                        "enum": ["celsius", "fahrenheit"]
                    }
                },
                "required": ["location"]
            }
        }
    }],
    "tool_choice": "auto"
}
curl https://gateway.serevixai.ai/v1/chat/completions \
    -H "Content-Type: application/json" \
    -H "Accept: application/json" \
    -H "Authorization: Bearer $YOUR_API_KEY" \
    -d "{
    \"model\": \"gemini-2.5-flash\",
    \"messages\": [{
        \"role\": \"user\",
        \"content\": \"What's the weather like in Boston today?\"
    }],
    \"tools\": [{
        \"type\": \"function\",
        \"function\": {
            \"name\": \"get_current_weather\",
            \"description\": \"Get the current weather in a given location\",
            \"parameters\": {
                \"type\": \"object\",
                \"properties\": {
                    \"location\": {
                        \"type\": \"string\",
                        \"description\": \"The city and state, e.g. San Francisco, CA\"
                    },
                    \"unit\": {
                        \"type\": \"string\",
                        \"enum\": [\"celsius\", \"fahrenheit\"]
                    }
                },
                \"required\": [\"location\"]
            }
        }
    }],
    \"tool_choice\": \"auto\"
}"
package main

import (
    "context"
    "encoding/json"
    "fmt"

    "github.com/openai/openai-go"
    "github.com/openai/openai-go/option"
    "github.com/openai/openai-go/packages/param"
    "github.com/openai/openai-go/shared"
)

func main() {
    apiKey := "sk-123456789012345678901234567890123456789012345678"

    client := openai.NewClient(
        option.WithAPIKey(apiKey),
        option.WithBaseURL("https://gateway.serevixai.ai/v1"),
    )

    tools := []openai.ChatCompletionToolParam{
        {
            Type: "function",
            Function: shared.FunctionDefinitionParam{
                Name:        "get_current_weather",
                Description: param.NewOpt("Get the current weather in a given location"),
                Parameters: shared.FunctionParameters{
                    "type": "object",
                    "properties": map[string]interface{}{
                        "location": map[string]interface{}{
                            "type":        "string",
                            "description": "The city and state, e.g. San Francisco, CA",
                        },
                        "unit": map[string]interface{}{
                            "type": "string",
                            "enum": []string{"celsius", "fahrenheit"},
                        },
                    },
                    "required": []string{"location"},
                },
            },
        },
    }

    resp, err := client.Chat.Completions.New(
        context.Background(),
        openai.ChatCompletionNewParams{
            Model: "gemini-2.5-flash",
            Messages: []openai.ChatCompletionMessageParamUnion{
                openai.UserMessage("What's the weather like in Boston today?"),
            },
            Tools: tools,
            ToolChoice: openai.ChatCompletionToolChoiceOptionUnionParam{
                OfAuto: param.NewOpt("auto"),
            },
        },
    )

    if err != nil {
        fmt.Println("error:", err)
        return
    }

    msg := resp.Choices[0].Message

    if msg.ToolCalls != nil && len(msg.ToolCalls) > 0 {
        for _, call := range msg.ToolCalls {
            fmt.Println("🔧 Function called:", call.Function.Name)
            fmt.Println("📥 Arguments JSON:", call.Function.Arguments)

            // 如果你想解析参数
            var args map[string]any
            _ = json.Unmarshal([]byte(call.Function.Arguments), &args)
            fmt.Println("📦 Parsed args:", args)
        }
    } else {
        fmt.Println("💬 Assistant reply:", msg.Content)
    }
}
#!/usr/bin/env python3

import json
from openai import OpenAI

def main():
    api_key = "sk-123456789012345678901234567890123456789012345678"

    client = OpenAI(
        api_key=api_key,
        base_url="https://gateway.serevixai.ai/v1"
    )

    tools = [{
        "type": "function",
        "function": {
            "name": "get_current_weather",
            "description": "Get the current weather in a given location",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "The city and state, e.g. San Francisco, CA"
                    },
                    "unit": {
                        "type": "string",
                        "enum": ["celsius", "fahrenheit"]
                    }
                },
                "required": ["location"]
            }
        }
    }]

    response = client.chat.completions.create(
        model="gemini-2.5-flash",
        messages=[
            {"role": "user", "content": "What's the weather like in Boston today?"}
        ],
        tools=tools,
        tool_choice="auto"
    )

    msg = response.choices[0].message

    if msg.tool_calls:
        for call in msg.tool_calls:
            print(f"🔧 Function called: {call.function.name}")
            print(f"📥 Arguments JSON: {call.function.arguments}")

            args = json.loads(call.function.arguments)
            print(f"📦 Parsed args: {args}")
    else:
        print(f"💬 Assistant reply: {msg.content}")

if __name__ == "__main__":
    main()

将 Gemini 模型与实时网络内容相关联,并且适用于所有可用语言。这样一来,Gemini 就可以提供更准确的回答,并引用其知识断点之外的可验证来源。

POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY

{
    "model": "gemini-2.5-flash",
    "messages": [
        {
            "role": "user",
            "content": "你好,波士顿最近天气怎么样"
        }
    ],
    "web_search_options": {}
}
curl https://gateway.serevixai.ai/v1/chat/completions \
    -H "Content-Type: application/json" \
    -H "Accept: application/json" \
    -H "Authorization: Bearer $YOUR_API_KEY" \
    -d "{
    \"model\": \"gemini-2.5-flash\",
    \"messages\": [{
        \"role\": \"user\",
        \"content\": \"你好,波士顿最近天气怎么样\"
    }],
    \"web_search_options\": {}
}"
package main

import (
    "context"
    "fmt"

    "github.com/openai/openai-go"
    "github.com/openai/openai-go/option"
)

func main() {
    apiKey := "sk-123456789012345678901234567890123456789012345678"

    client := openai.NewClient(
        option.WithAPIKey(apiKey),
        option.WithBaseURL("https://gateway.serevixai.ai/v1"),
    )

    resp, err := client.Chat.Completions.New(
        context.Background(),
        openai.ChatCompletionNewParams{
            Model: "gemini-2.5-flash",
            Messages: []openai.ChatCompletionMessageParamUnion{
                openai.UserMessage("你好,波士顿最近天气怎么样"),
            },
            WebSearchOptions: openai.ChatCompletionNewParamsWebSearchOptions{},
        },
    )

    if err != nil {
        fmt.Println("error:", err)
        return
    }

    fmt.Println("💬 Assistant reply:")
    fmt.Println(resp.Choices[0].Message.Content)

    if resp.Usage.PromptTokens > 0 {
        fmt.Println("\n📊 Token usage:")
        fmt.Printf("  - Prompt tokens: %d\n", resp.Usage.PromptTokens)
        fmt.Printf("  - Completion tokens: %d\n", resp.Usage.CompletionTokens)
        fmt.Printf("  - Total tokens: %d\n", resp.Usage.TotalTokens)
    }
}
#!/usr/bin/env python3

from openai import OpenAI

def main():
    api_key = "sk-123456789012345678901234567890123456789012345678"

    client = OpenAI(
        api_key=api_key,
        base_url="https://gateway.serevixai.ai/v1"
    )

    response = client.chat.completions.create(
        model="gemini-2.5-flash",
        messages=[
            {"role": "user", "content": "你好,波士顿最近天气怎么样"}
        ],
        web_search_options={}
    )

    print(response.choices[0].message.content)

if __name__ == "__main__":
    main()

5. 响应示例

{
    "id": "chatcmpl-1234567890",
    "object": "chat.completion",
    "created": 1699999999,
    "model": "gemini-2.5-flash",
    "choices": [
        {
            "message": {
                "role": "assistant",
                "content": "量子力学是研究微观世界的物理学分支……"
            },
            "finish_reason": "stop"
        }
    ],
    "usage": {
        "prompt_tokens": 10,
        "completion_tokens": 30,
        "total_tokens": 40
    }
}