Function calling
Enable models to fetch data and take actions.
Function calling provides a powerful and flexible way for OpenAI models to interface with your code or external services, and has two primary use cases:
Fetching Data | Retrieve up-to-date information to incorporate into the model's response (RAG). Useful for searching knowledge bases and retrieving specific data from APIs (e.g. current weather data). |
Taking Action | Perform actions like submitting a form, calling APIs, modifying application state (UI/frontend or backend), or taking agentic workflow actions (like handing off the conversation). |
Get weather
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
from openai import OpenAI
client = OpenAI()
tools = [{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current temperature for a given location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
}
},
"required": [
"location"
],
"additionalProperties": False
},
"strict": True
}
}]
completion = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "What is the weather like in Paris today?"}],
tools=tools
)
print(completion.choices[0].message.tool_calls)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import { OpenAI } from "openai";
const openai = new OpenAI();
const tools = [{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current temperature for a given location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
}
},
"required": [
"location"
],
"additionalProperties": false
},
"strict": true
}
}];
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: "What is the weather like in Paris today?" }],
tools,
});
console.log(completion.choices[0].message.tool_calls);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
curl https://api.openai.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $OPENAI_API_KEY" \
-d '{
"model": "gpt-4o",
"messages": [
{
"role": "user",
"content": "What is the weather like in Paris today?"
}
],
"tools": [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current temperature for a given location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
}
},
"required": [
"location"
],
"additionalProperties": false
},
"strict": true
}
}
]
}'
1
2
3
4
5
6
7
8
[{
"id": "call_12345xyz",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\":\"Paris, France\"}"
}
}]
Send email
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from openai import OpenAI
client = OpenAI()
tools = [{
"type": "function",
"function": {
"name": "send_email",
"description": "Send an email to a given recipient with a subject and message.",
"parameters": {
"type": "object",
"properties": {
"to": {
"type": "string",
"description": "The recipient email address."
},
"subject": {
"type": "string",
"description": "Email subject line."
},
"body": {
"type": "string",
"description": "Body of the email message."
}
},
"required": [
"to",
"subject",
"body"
],
"additionalProperties": False
},
"strict": True
}
}]
completion = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Can you send an email to ilan@example.com and katia@example.com saying hi?"}],
tools=tools
)
print(completion.choices[0].message.tool_calls)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import { OpenAI } from "openai";
const openai = new OpenAI();
const tools = [{
"type": "function",
"function": {
"name": "send_email",
"description": "Send an email to a given recipient with a subject and message.",
"parameters": {
"type": "object",
"properties": {
"to": {
"type": "string",
"description": "The recipient email address."
},
"subject": {
"type": "string",
"description": "Email subject line."
},
"body": {
"type": "string",
"description": "Body of the email message."
}
},
"required": [
"to",
"subject",
"body"
],
"additionalProperties": false
},
"strict": true
}
}];
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: "Can you send an email to ilan@example.com and katia@example.com saying hi?" }],
tools,
});
console.log(completion.choices[0].message.tool_calls);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
curl https://api.openai.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $OPENAI_API_KEY" \
-d '{
"model": "gpt-4o",
"messages": [
{
"role": "user",
"content": "Can you send an email to ilan@example.com and katia@example.com saying hi?"
}
],
"tools": [
{
"type": "function",
"function": {
"name": "send_email",
"description": "Send an email to a given recipient with a subject and message.",
"parameters": {
"type": "object",
"properties": {
"to": {
"type": "string",
"description": "The recipient email address."
},
"subject": {
"type": "string",
"description": "Email subject line."
},
"body": {
"type": "string",
"description": "Body of the email message."
}
},
"required": [
"to",
"subject",
"body"
],
"additionalProperties": false
},
"strict": true
}
}
]
}'
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[
{
"id": "call_9876abc",
"type": "function",
"function": {
"name": "send_email",
"arguments": "{\"to\":\"ilan@example.com\",\"subject\":\"Hello!\",\"body\":\"Just wanted to say hi\"}"
}
},
{
"id": "call_9876abc",
"type": "function",
"function": {
"name": "send_email",
"arguments": "{\"to\":\"katia@example.com\",\"subject\":\"Hello!\",\"body\":\"Just wanted to say hi\"}"
}
}
]
Search knowledge base
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
from openai import OpenAI
client = OpenAI()
tools = [{
"type": "function",
"function": {
"name": "search_knowledge_base",
"description": "Query a knowledge base to retrieve relevant info on a topic.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The user question or search query."
},
"options": {
"type": "object",
"properties": {
"num_results": {
"type": "number",
"description": "Number of top results to return."
},
"domain_filter": {
"type": [
"string",
"null"
],
"description": "Optional domain to narrow the search (e.g. 'finance', 'medical'). Pass null if not needed."
},
"sort_by": {
"type": [
"string",
"null"
],
"enum": [
"relevance",
"date",
"popularity",
"alphabetical"
],
"description": "How to sort results. Pass null if not needed."
}
},
"required": [
"num_results",
"domain_filter",
"sort_by"
],
"additionalProperties": False
}
},
"required": [
"query",
"options"
],
"additionalProperties": False
},
"strict": True
}
}]
completion = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Can you find information about ChatGPT in the AI knowledge base?"}],
tools=tools
)
print(completion.choices[0].message.tool_calls)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import { OpenAI } from "openai";
const openai = new OpenAI();
const tools = [{
"type": "function",
"function": {
"name": "search_knowledge_base",
"description": "Query a knowledge base to retrieve relevant info on a topic.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The user question or search query."
},
"options": {
"type": "object",
"properties": {
"num_results": {
"type": "number",
"description": "Number of top results to return."
},
"domain_filter": {
"type": [
"string",
"null"
],
"description": "Optional domain to narrow the search (e.g. 'finance', 'medical'). Pass null if not needed."
},
"sort_by": {
"type": [
"string",
"null"
],
"enum": [
"relevance",
"date",
"popularity",
"alphabetical"
],
"description": "How to sort results. Pass null if not needed."
}
},
"required": [
"num_results",
"domain_filter",
"sort_by"
],
"additionalProperties": false
}
},
"required": [
"query",
"options"
],
"additionalProperties": false
},
"strict": true
}
}];
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: "Can you find information about ChatGPT in the AI knowledge base?" }],
tools,
});
console.log(completion.choices[0].message.tool_calls);
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
curl https://api.openai.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $OPENAI_API_KEY" \
-d '{
"model": "gpt-4o",
"messages": [
{
"role": "user",
"content": "Can you find information about ChatGPT in the AI knowledge base?"
}
],
"tools": [
{
"type": "function",
"function": {
"name": "search_knowledge_base",
"description": "Query a knowledge base to retrieve relevant info on a topic.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The user question or search query."
},
"options": {
"type": "object",
"properties": {
"num_results": {
"type": "number",
"description": "Number of top results to return."
},
"domain_filter": {
"type": [
"string",
"null"
],
"description": "Optional domain to narrow the search (e.g. 'finance', 'medical'). Pass null if not needed."
},
"sort_by": {
"type": [
"string",
"null"
],
"enum": [
"relevance",
"date",
"popularity",
"alphabetical"
],
"description": "How to sort results. Pass null if not needed."
}
},
"required": [
"num_results",
"domain_filter",
"sort_by"
],
"additionalProperties": false
}
},
"required": [
"query",
"options"
],
"additionalProperties": false
},
"strict": true
}
}
]
}'
1
2
3
4
5
6
7
8
[{
"id": "call_4567xyz",
"type": "function",
"function": {
"name": "search_knowledge_base",
"arguments": "{\"query\":\"What is ChatGPT?\",\"options\":{\"num_results\":3,\"domain_filter\":null,\"sort_by\":\"relevance\"}}"
}
}]
You can extend the capabilities of OpenAI models by giving them access to tools
, which can have one of two forms:
Function Calling | Developer-defined code. |
Hosted Tools | |
OpenAI-built tools. (e.g. file search, code interpreter) |
Only available in the Assistants API.
|
This guide will cover how you can give the model access to your own functions through function calling. Based on the system prompt and messages, the model may decide to call these functions — instead of (or in addition to) generating text or audio.
You'll then execute the function code, send back the results, and the model will incorporate them into its final response.
Let's look at the steps to allow a model to use a real get_weather
function defined below:
1
2
3
4
5
6
import requests
def get_weather(latitude, longitude):
response = requests.get(f"https://api.open-meteo.com/v1/forecast?latitude={latitude}&longitude={longitude}¤t=temperature_2m,wind_speed_10m&hourly=temperature_2m,relative_humidity_2m,wind_speed_10m")
data = response.json()
return data['current']['temperature_2m']
1
2
3
4
5
async function getWeather(latitude, longitude) {
const response = await fetch(`https://api.open-meteo.com/v1/forecast?latitude=${latitude}&longitude=${longitude}¤t=temperature_2m,wind_speed_10m&hourly=temperature_2m,relative_humidity_2m,wind_speed_10m`);
const data = await response.json();
return data.current.temperature_2m;
}
Unlike the diagram earlier, this function expects precise latitude
and longitude
instead of a general location
parameter. (However, our models can automatically determine the coordinates for many locations!)
- Call model with functions defined – along with your system and user messages.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from openai import OpenAI
import json
client = OpenAI()
tools = [{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current temperature for provided coordinates in celsius.",
"parameters": {
"type": "object",
"properties": {
"latitude": {"type": "number"},
"longitude": {"type": "number"}
},
"required": ["latitude", "longitude"],
"additionalProperties": False
},
"strict": True
}
}]
messages = [{"role": "user", "content": "What's the weather like in Paris today?"}]
completion = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import { OpenAI } from "openai";
const openai = new OpenAI();
const tools = [{
type: "function",
function: {
name: "get_weather",
description: "Get current temperature for provided coordinates in celsius.",
parameters: {
type: "object",
properties: {
latitude: { type: "number" },
longitude: { type: "number" }
},
required: ["latitude", "longitude"],
additionalProperties: false
},
strict: true
}
}];
const messages = [
{
role: "user",
content: "What's the weather like in Paris today?"
}
];
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages,
tools
});
- Model decides to call function(s) – model returns the name and input arguments.
1
2
3
4
5
6
7
8
[{
"id": "call_12345xyz",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"latitude\":48.8566,\"longitude\":2.3522}"
}
}]
- Execute function code – parse the model's response and handle function calls.
1
2
3
4
tool_call = completion.choices[0].message.tool_calls[0]
args = json.loads(tool_call.function.arguments)
result = get_weather(args["latitude"], args["longitude"])
1
2
3
4
const toolCall = completion.choices[0].message.tool_calls[0];
const args = JSON.parse(toolCall.function.arguments);
const result = await get_weather(args.latitude, args.longitude);
- Supply model with results – so it can incorporate them into its final response.
1
2
3
4
5
6
7
8
9
10
11
12
messages.append(completion.choices[0].message) # append model's function call message
messages.append({ # append result message
"role": "tool",
"tool_call_id": tool_call.id,
"content": result
})
completion_2 = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
messages.push(completion.choices[0].message); // append model's function call message
messages.push({ // append result message
role: "tool",
tool_call_id: toolCall.id,
content: result.toString()
});
const completion2 = await openai.chat.completions.create({
model: "gpt-4o",
messages,
tools
});
console.log(completion2.choices[0].message.content);
- Model responds – incorporating the result in its output.
"The current temperature in Paris is 14°C (57.2°F)."
Functions can be set in the tools
parameter of each API request inside a function
object.
A function is defined by its schema, which informs the model what it does and what input arguments it expects. It comprises the following fields:
Field | Description |
---|---|
name |
The function's name (e.g. get_weather ) |
description |
Details on when and how to use the function |
parameters |
JSON schema defining the function's input arguments |
Take a look at this example or generate your own below (or in our Playground).
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Retrieves current weather for the given location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
},
"units": {
"type": "string",
"enum": [
"celsius",
"fahrenheit"
],
"description": "Units the temperature will be returned in."
}
},
"required": [
"location",
"units"
],
"additionalProperties": false
},
"strict": true
}
}
Because the parameters
are defined by a JSON schema, you can leverage many of its rich features like property types, enums, descriptions, nested objects, and, recursive objects.
(Optional) Function calling wth pydantic and zod
While we encourage you to define your function schemas directly, our SDKs have helpers to convert pydantic
and zod
objects into schemas. Not all pydantic
and zod
features are supported.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
from openai import OpenAI, pydantic_function_tool
from pydantic import BaseModel, Field
client = OpenAI()
class GetWeather(BaseModel):
location: str = Field(
...,
description="City and country e.g. Bogotá, Colombia"
)
tools = [pydantic_function_tool(GetWeather)]
completion = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "What's the weather like in Paris today?"}],
tools=tools
)
print(completion.choices[0].message.tool_calls)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import OpenAI from "openai";
import { z } from "zod";
import { zodFunction } from "openai/helpers/zod";
const openai = new OpenAI();
const GetWeatherParameters = z.object({
location: z.string().describe("City and country e.g. Bogotá, Colombia"),
});
const tools = [
zodFunction({ name: "getWeather", parameters: GetWeatherParameters }),
];
const messages = [
{ role: "user", content: "What's the weather like in Paris today?" },
];
const response = await openai.chat.completions.create({
model: "gpt-4o",
messages,
tools,
});
console.log(response.choices[0].message.tool_calls);
-
Write clear and detailed function names, parameter descriptions, and instructions.
- Explicitly describe the purpose of the function and each parameter (and its format), and what the output represents.
- Use the system prompt to describe when (and when not) to use each function. Generally, tell the model exactly what to do.
- Include examples and edge cases, especially to rectify any recurring failures. (Note: Adding examples may hurt performance for reasoning models.)
-
Apply software engineering best practices.
- Make the functions obvious and intuitive. (principle of least surprise)
-
Use enums and object structure to make invalid states unrepresentable. (e.g.
toggle_light(on: bool, off: bool)
allows for invalid calls) - Pass the intern test. Can an intern/human correctly use the function given nothing but what you gave the model? (If not, what questions do they ask you? Add the answers to the prompt.)
-
Offload the burden from the model and use code where possible.
-
Don't make the model fill arguments you already know. For example, if you already have an
order_id
based on a previous menu, don't have anorder_id
param – instead, have no paramssubmit_refund()
and pass theorder_id
with code. -
Combine functions that are always called in sequence. For example, if you always call
mark_location()
afterquery_location()
, just move the marking logic into the query function call.
-
Don't make the model fill arguments you already know. For example, if you already have an
-
Keep the number of functions small for higher accuracy.
- Evaluate your performance with different numbers of functions.
- Aim for fewer than 20 functions at any one time, though this is just a soft suggestion.
-
Leverage OpenAI resources.
- Generate and iterate on function schemas in the Playground.
- Consider fine-tuning to increase function calling accuracy for large numbers of functions or difficult tasks. (cookbook)
Under the hood, functions are injected into the system message in a syntax the model has been trained on. This means functions count against the model's context limit and are billed as input tokens. If you run into token limits, we suggest limiting the number of functions or the length of the descriptions you provide for function parameters.
It is also possible to use fine-tuning to reduce the number of tokens used if you have many functions defined in your tools specification.
When the model calls a function, you must execute it and return the result. Since model responses can include zero, one, or multiple calls, it is best practice to assume there are several.
The response has an array of tool_calls
, each with an id
(used later to submit the function result) and a function
containing a name
and JSON-encoded arguments
.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
[
{
"id": "call_12345xyz",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\":\"Paris, France\"}"
}
},
{
"id": "call_67890abc",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\":\"Bogotá, Colombia\"}"
}
},
{
"id": "call_99999def",
"type": "function",
"function": {
"name": "send_email",
"arguments": "{\"to\":\"bob@email.com\",\"body\":\"Hi bob\"}"
}
}
]
1
2
3
4
5
6
7
8
9
10
for tool_call in completion.choices[0].message.tool_calls:
name = tool_call.function.name
args = json.loads(tool_call.function.arguments)
result = call_function(name, args)
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": result
})
1
2
3
4
5
6
7
8
9
10
11
for (const toolCall of completion.choices[0].message.tool_calls) {
const name = toolCall.function.name;
const args = JSON.parse(toolCall.function.arguments);
const result = callFunction(name, args);
messages.push({
role: "tool",
tool_call_id: toolCall.id,
content: result.toString()
});
}
In the example above, we have a hypothetical call_function
to route each call. Here’s a possible implementation:
1
2
3
4
5
def call_function(name, args):
if name == "get_weather":
return get_weather(**args)
if name == "send_email":
return send_email(**args)
1
2
3
4
5
6
7
8
const callFunction = async (name, args) => {
if (name === "get_weather") {
return getWeather(args.latitude, args.longitude);
}
if (name === "send_email") {
return sendEmail(args.to, args.body);
}
};
A result must be a string, but the format is up to you (JSON, error codes, plain text, etc.). The model will interpret that string as needed.
If your function has no return value (e.g. send_email
), simply return a string to indicate success or failure. (e.g. "success"
)
After appending the results to your messages
, you can send them back to the model to get a final response.
1
2
3
4
5
completion = client.chat.completions.create(
model="gpt-4o",
messages=messages,
tools=tools,
)
1
2
3
4
5
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages,
tools
});
"It's about 15°C in Paris, 18°C in Bogotá, and I've sent that email to Bob."
By default the model will determine when and how many tools to use. You can force specific behavior with the tool_choice
parameter.
-
Auto: (Default) Call zero, one, or multiple functions.
tool_choice: "auto"
-
Required: Call one or more functions.
tool_choice: "required"
-
Forced Function: Call exactly one specific function.
tool_choice: {"type": "function", "function": {"name": "get_weather"}}
You can also set tool_choice
to "none"
to imitate the behavior of passing no functions.
The model may choose to call multiple functions in a single turn. You can prevent this by setting parallel_tool_calls
to false
, which ensures exactly zero or one tool is called.
Note: Currently, if the model calls multiple functions in one turn then strict mode will be disabled for those calls.
Setting strict
to true
will ensure function calls reliably adhere to the function schema, instead of being best effort. We recommend always enabling strict mode.
Under the hood, strict mode works by leveraging our structured outputs feature and therefore introduces a couple requirements:
-
additionalProperties
must be set tofalse
for each object in theparameters
. - All fields in
properties
must be marked asrequired
.
You can denote optional fields by adding null
as a type
option (see example below).
Strict mode enabled
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Retrieves current weather for the given location.",
"strict": true,
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
},
"units": {
"type": ["string", "null"],
"enum": ["celsius", "fahrenheit"],
"description": "Units the temperature will be returned in."
}
},
"required": ["location", "units"],
"additionalProperties": false
}
}
}
Strict mode disabled
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Retrieves current weather for the given location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
},
"units": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Units the temperature will be returned in."
}
},
"required": ["location"],
}
}
}
All schemas generated in the playground have strict mode enabled.
While we recommend you enable strict mode, it has a few limitations:
- Some features of JSON schema are not supported. (See supported schemas.)
- Schemas undergo additional processing on the first request (and are then cached). If your schemas vary from request to request, this may result in higher latencies.
- Schemas are cached for performance, and are not eligible for zero data retention.
Streaming can be used to surface progress by showing which function is called as the model fills its arguments, and even displaying the arguments in real time.
Streaming function calls is very similar to streaming regular responses: you set stream
to true
and get chunks with delta
objects.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
from openai import OpenAI
client = OpenAI()
tools = [{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current temperature for a given location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
}
},
"required": ["location"],
"additionalProperties": False
},
"strict": True
}
}]
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "What's the weather like in Paris today?"}],
tools=tools,
stream=True
)
for chunk in stream:
delta = chunk.choices[0].delta
print(delta.tool_calls)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import { OpenAI } from "openai";
const openai = new OpenAI();
const tools = [{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current temperature for a given location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country e.g. Bogotá, Colombia"
}
},
"required": ["location"],
"additionalProperties": false
},
"strict": true
}
}];
const stream = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: "What's the weather like in Paris today?" }],
tools,
stream: true
});
for await (const chunk of stream) {
const delta = chunk.choices[0].delta;
console.log(delta.tool_calls);
}
1
2
3
4
5
6
7
8
9
[{"index": 0, "id": "call_DdmO9pD3xa9XTPNJ32zg2hcA", "function": {"arguments": "", "name": "get_weather"}, "type": "function"}]
[{"index": 0, "id": null, "function": {"arguments": "{\"", "name": null}, "type": null}]
[{"index": 0, "id": null, "function": {"arguments": "location", "name": null}, "type": null}]
[{"index": 0, "id": null, "function": {"arguments": "\":\"", "name": null}, "type": null}]
[{"index": 0, "id": null, "function": {"arguments": "Paris", "name": null}, "type": null}]
[{"index": 0, "id": null, "function": {"arguments": ",", "name": null}, "type": null}]
[{"index": 0, "id": null, "function": {"arguments": " France", "name": null}, "type": null}]
[{"index": 0, "id": null, "function": {"arguments": "\"}", "name": null}, "type": null}]
null
Instead of aggregating chunks into a single content
string, however, you're aggregating chunks into an encoded arguments
JSON object.
When the model calls one or more functions the tool_calls
field of each delta
will be populated. Each tool_call
contains the following fields:
Field | Description |
---|---|
index |
Identifies which function call the delta is for |
id |
Tool call id. |
function |
Function call delta (name and arguments ) |
type |
Type of tool_call (always function for function calls) |
Many of these fields are only set for the first delta
of each tool call, like id
, function.name
, and type
.
Below is a code snippet demonstrating how to aggregate the delta
s into a final tool_calls
object.
1
2
3
4
5
6
7
8
9
10
final_tool_calls = {}
for chunk in stream:
for tool_call in chunk.choices[0].delta.tool_calls or []:
index = tool_call.index
if index not in final_tool_calls:
final_tool_calls[index] = tool_call
final_tool_calls[index].function.arguments += tool_call.function.arguments
1
2
3
4
5
6
7
8
9
10
11
12
13
14
const finalToolCalls = {};
for await (const chunk of stream) {
const toolCalls = chunk.choices[0].delta.tool_calls || [];
for (const toolCall of toolCalls) {
const { index } = toolCall;
if (!finalToolCalls[index]) {
finalToolCalls[index] = toolCall;
}
finalToolCalls[index].function.arguments += toolCall.function.arguments;
}
}
1
2
3
4
5
6
7
8
{
"index": 0,
"id": "call_RzfkBpJgzeR0S242qfvjadNe",
"function": {
"name": "get_weather",
"arguments": "{\"location\":\"Paris, France\"}"
}
}