Google Gemini¶
1. Overview¶
A multimodal model family from Google that can process text, images, audio, video, and code.
2. Request¶
- Method:
POST -
Endpoint:
https://gateway.serevixai.ai/v1/chat/completions
3. Parameters¶
3.1 Header Parameters¶
| Parameter | Type | Required | Description | Example |
|---|---|---|---|---|
Content-Type |
string | Yes | Sets the request content type. It must be application/json |
application/json |
Accept |
string | Yes | Sets the response content type. The recommended value is application/json |
application/json |
Authorization |
string | Yes | API key required for authentication, in the format Bearer $YOUR_API_KEY. |
Bearer $YOUR_API_KEY |
3.2 Body Parameters (application/json)¶
| Parameter | Type | Required | Description | Example |
|---|---|---|---|---|
| model | string | Yes | The model ID to use. See Model List for available versions, such as gemini-2.5-flash. |
gemini-2.5-flash |
| messages | array | Yes | A chat message list in an OpenAI-compatible format. Each object contains role and content. |
[{"role": "user","content": "Hello"}] |
| role | string | No | Message role. Supported values: system, user, and assistant. |
user |
| content | string/array | No | The message content. | Hello, tell me a joke. |
| temperature | number | No | Sampling temperature in the range 0-2. Higher values make the output more random, while lower values make it more focused and deterministic. |
0.7 |
| top_p | number | No | Another way to control the sampling distribution, in the range 0-1. It is usually used instead of temperature. |
0.9 |
| n | number | No | How many completions to generate for each input message. | 1 |
| stream | boolean | No | Whether to enable streaming output. When set to true, the API returns ChatGPT-style streamed data. |
false |
| stop | string | No | You can specify up to 4 stop strings. Generation stops when one of them appears in the output. | "\n" |
| max_tokens | number | No | The maximum number of tokens that can be generated in a single reply, subject to the model context window. | 1024 |
| presence_penalty | number | No | -2.0 to 2.0. Positive values encourage the model to introduce new topics, while negative values reduce that tendency. | 0 |
| frequency_penalty | number | No | -2.0 to 2.0. Positive values reduce repetition, while negative values increase it. | 0 |
| reasoning_effort | string | No | Controls how much reasoning effort the model uses. Currently only gemini-2.5-flash-preview-04-17 supports this field. Supported values: low, medium, high, and none. The default is low. |
low |
| web_search_options | object | No | Controls whether Google Search grounding is enabled. | {} |
4. Request Examples¶
4.1 Chat¶
POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY
{
"model": "gemini-2.5-flash",
"messages": [
{
"role": "user",
"content": "Hello, can you explain quantum mechanics to me?"
}
]
}
curl https://gateway.serevixai.ai/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-H "Authorization: Bearer $YOUR_API_KEY" \
-d "{
\"model\": \"gemini-2.5-flash\",
\"messages\": [{
\"role\": \"user\",
\"content\": \"Hello, can you explain quantum mechanics to me?\"
}]
}"
package main
import (
"context"
"fmt"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
apiKey := "sk-123456789012345678901234567890123456789012345678"
client := openai.NewClient(
option.WithAPIKey(apiKey),
option.WithBaseURL("https://gateway.serevixai.ai/v1"),
)
resp, err := client.Chat.Completions.New(
context.Background(),
openai.ChatCompletionNewParams{
Model: "gemini-2.5-flash",
Messages: []openai.ChatCompletionMessageParamUnion{
openai.UserMessage("Hello, can you explain quantum mechanics to me?"),
},
},
)
if err != nil {
fmt.Println("error:", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
#!/usr/bin/env python3
from openai import OpenAI
def main():
api_key = "sk-123456789012345678901234567890123456789012345678"
client = OpenAI(
api_key=api_key,
base_url="https://gateway.serevixai.ai/v1"
)
response = client.chat.completions.create(
model="gemini-2.5-flash",
messages=[
{"role": "user", "content": "Hello, can you explain quantum mechanics to me?"}
]
)
print(response.choices[0].message.content)
if __name__ == "__main__":
main()
4.2 Media Understanding¶
The Media Understanding capability supports documents, images, audio, and video. See the supported media types below for details.
POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY
{
"model": "gemini-2.5-flash",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What is in this image?"
},
{
"type": "image_url",
"image_url": {
"url": "data:image/jpeg;base64,${base64_image}"
}
}
]
}
]
}
base64_image=$(base64 -i "Path/to/agi/image.jpeg");
curl https://gateway.serevixai.ai/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-H "Authorization: Bearer $YOUR_API_KEY" \
-d "{
\"model\": \"gemini-2.5-flash\",
\"messages\": [{
\"role\": \"user\",
\"content\": [{
\"type\": \"text\",
\"text\": \"What is in this image?\"
},
{
\"type\": \"image_url\",
\"image_url\": {
\"url\": \"data:image/jpeg;base64,${base64_image}\"
}
}
]
}]
}"
package main
import (
"context"
"encoding/base64"
"fmt"
"os"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
apiKey := "sk-123456789012345678901234567890123456789012345678"
client := openai.NewClient(
option.WithAPIKey(apiKey),
option.WithBaseURL("https://gateway.serevixai.ai/v1"),
)
imagePath := "Path/to/agi/image.jpeg"
imageBytes, err := os.ReadFile(imagePath)
if err != nil {
fmt.Println("error:", err)
return
}
base64Image := base64.StdEncoding.EncodeToString(imageBytes)
imageURL := "data:image/jpeg;base64," + base64Image
resp, err := client.Chat.Completions.New(
context.Background(),
openai.ChatCompletionNewParams{
Model: "gemini-2.5-flash",
Messages: []openai.ChatCompletionMessageParamUnion{
openai.UserMessage([]openai.ChatCompletionContentPartUnionParam{
openai.TextContentPart("What is in this image?"),
openai.ImageContentPart(openai.ChatCompletionContentPartImageImageURLParam{
URL: imageURL,
}),
}),
},
},
)
if err != nil {
fmt.Println("error:", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
#!/usr/bin/env python3
import base64
from openai import OpenAI
def main():
api_key = "sk-123456789012345678901234567890123456789012345678"
client = OpenAI(
api_key=api_key,
base_url="https://gateway.serevixai.ai/v1"
)
image_path = "Path/to/agi/image.jpeg"
with open(image_path, "rb") as f:
image_bytes = f.read()
base64_image = base64.b64encode(image_bytes).decode("utf-8")
image_url = f"data:image/jpeg;base64,{base64_image}"
response = client.chat.completions.create(
model="gemini-2.5-flash",
messages=[{
"role": "user",
"content": [
{"type": "text", "text": "What is in this image?"},
{"type": "image_url", "image_url": {"url": image_url}}
]
}]
)
print(response.choices[0].message.content)
if __name__ == "__main__":
main()
Supported Media Types
**Image types:** `image/png` `image/jpeg` `image/webp` `image/heic` `image/heif` **Audio types:** `audio/wav` `audio/mp3` `audio/aiff` `audio/aac` `audio/ogg` `audio/flac` **Video types:** `video/mp4` `video/mpeg` `video/mov` `video/avi` `video/x-flv` `video/mpg` `video/webm` `video/wmv` `video/3gpp` **Document types:** `application/pdf` `application/x-javascript` `text/javascript` `application/x-python` `text/x-python` `text/plain` `text/html` `text/css` `text/md` `text/csv` `text/xml` `text/rtf`4.3 Function Calling¶
POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY
{
"model": "gemini-2.5-flash",
"messages": [{
"role": "user",
"content": "What's the weather like in Boston today?"
}],
"tools": [{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}],
"tool_choice": "auto"
}
curl https://gateway.serevixai.ai/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-H "Authorization: Bearer $YOUR_API_KEY" \
-d "{
\"model\": \"gemini-2.5-flash\",
\"messages\": [{
\"role\": \"user\",
\"content\": \"What's the weather like in Boston today?\"
}],
\"tools\": [{
\"type\": \"function\",
\"function\": {
\"name\": \"get_current_weather\",
\"description\": \"Get the current weather in a given location\",
\"parameters\": {
\"type\": \"object\",
\"properties\": {
\"location\": {
\"type\": \"string\",
\"description\": \"The city and state, e.g. San Francisco, CA\"
},
\"unit\": {
\"type\": \"string\",
\"enum\": [\"celsius\", \"fahrenheit\"]
}
},
\"required\": [\"location\"]
}
}
}],
\"tool_choice\": \"auto\"
}"
package main
import (
"context"
"encoding/json"
"fmt"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
"github.com/openai/openai-go/packages/param"
"github.com/openai/openai-go/shared"
)
func main() {
apiKey := "sk-123456789012345678901234567890123456789012345678"
client := openai.NewClient(
option.WithAPIKey(apiKey),
option.WithBaseURL("https://gateway.serevixai.ai/v1"),
)
tools := []openai.ChatCompletionToolParam{
{
Type: "function",
Function: shared.FunctionDefinitionParam{
Name: "get_current_weather",
Description: param.NewOpt("Get the current weather in a given location"),
Parameters: shared.FunctionParameters{
"type": "object",
"properties": map[string]interface{}{
"location": map[string]interface{}{
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": map[string]interface{}{
"type": "string",
"enum": []string{"celsius", "fahrenheit"},
},
},
"required": []string{"location"},
},
},
},
}
resp, err := client.Chat.Completions.New(
context.Background(),
openai.ChatCompletionNewParams{
Model: "gemini-2.5-flash",
Messages: []openai.ChatCompletionMessageParamUnion{
openai.UserMessage("What's the weather like in Boston today?"),
},
Tools: tools,
ToolChoice: openai.ChatCompletionToolChoiceOptionUnionParam{
OfAuto: param.NewOpt("auto"),
},
},
)
if err != nil {
fmt.Println("error:", err)
return
}
msg := resp.Choices[0].Message
if msg.ToolCalls != nil && len(msg.ToolCalls) > 0 {
for _, call := range msg.ToolCalls {
fmt.Println("π§ Function called:", call.Function.Name)
fmt.Println("π₯ Arguments JSON:", call.Function.Arguments)
// If you want to parse the arguments
var args map[string]any
_ = json.Unmarshal([]byte(call.Function.Arguments), &args)
fmt.Println("π¦ Parsed args:", args)
}
} else {
fmt.Println("π¬ Assistant reply:", msg.Content)
}
}
#!/usr/bin/env python3
import json
from openai import OpenAI
def main():
api_key = "sk-123456789012345678901234567890123456789012345678"
client = OpenAI(
api_key=api_key,
base_url="https://gateway.serevixai.ai/v1"
)
tools = [{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}]
response = client.chat.completions.create(
model="gemini-2.5-flash",
messages=[
{"role": "user", "content": "What's the weather like in Boston today?"}
],
tools=tools,
tool_choice="auto"
)
msg = response.choices[0].message
if msg.tool_calls:
for call in msg.tool_calls:
print(f"π§ Function called: {call.function.name}")
print(f"π₯ Arguments JSON: {call.function.arguments}")
args = json.loads(call.function.arguments)
print(f"π¦ Parsed args: {args}")
else:
print(f"π¬ Assistant reply: {msg.content}")
if __name__ == "__main__":
main()
4.4 Google Search¶
Connect Gemini models to live web content across all supported languages so responses can be more accurate and cite verifiable sources beyond the model's cutoff.
POST /v1/chat/completions
Content-Type: application/json
Accept: application/json
Authorization: Bearer $YOUR_API_KEY
{
"model": "gemini-2.5-flash",
"messages": [
{
"role": "user",
"content": "Hello, how is the weather in Boston recently?"
}
],
"web_search_options": {}
}
curl https://gateway.serevixai.ai/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-H "Authorization: Bearer $YOUR_API_KEY" \
-d "{
\"model\": \"gemini-2.5-flash\",
\"messages\": [{
\"role\": \"user\",
\"content\": \"Hello, how is the weather in Boston recently?\"
}],
\"web_search_options\": {}
}"
package main
import (
"context"
"fmt"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
apiKey := "sk-123456789012345678901234567890123456789012345678"
client := openai.NewClient(
option.WithAPIKey(apiKey),
option.WithBaseURL("https://gateway.serevixai.ai/v1"),
)
resp, err := client.Chat.Completions.New(
context.Background(),
openai.ChatCompletionNewParams{
Model: "gemini-2.5-flash",
Messages: []openai.ChatCompletionMessageParamUnion{
openai.UserMessage("Hello, how is the weather in Boston recently?"),
},
WebSearchOptions: openai.ChatCompletionNewParamsWebSearchOptions{},
},
)
if err != nil {
fmt.Println("error:", err)
return
}
fmt.Println("π¬ Assistant reply:")
fmt.Println(resp.Choices[0].Message.Content)
if resp.Usage.PromptTokens > 0 {
fmt.Println("\nπ Token usage:")
fmt.Printf(" - Prompt tokens: %d\n", resp.Usage.PromptTokens)
fmt.Printf(" - Completion tokens: %d\n", resp.Usage.CompletionTokens)
fmt.Printf(" - Total tokens: %d\n", resp.Usage.TotalTokens)
}
}
#!/usr/bin/env python3
from openai import OpenAI
def main():
api_key = "sk-123456789012345678901234567890123456789012345678"
client = OpenAI(
api_key=api_key,
base_url="https://gateway.serevixai.ai/v1"
)
response = client.chat.completions.create(
model="gemini-2.5-flash",
messages=[
{"role": "user", "content": "Hello, how is the weather in Boston recently?"}
],
web_search_options={}
)
print(response.choices[0].message.content)
if __name__ == "__main__":
main()
5. Response Example¶
{
"id": "chatcmpl-1234567890",
"object": "chat.completion",
"created": 1699999999,
"model": "gemini-2.5-flash",
"choices": [
{
"message": {
"role": "assistant",
"content": "Quantum mechanics is the branch of physics that studies the microscopic world..."
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 30,
"total_tokens": 40
}
}