Browse Source

feat: add openai o1 & update pricing and max_token of other models (#11780)

Signed-off-by: -LAN- <laipz8200@outlook.com>
tags/0.14.1
-LAN- 10 months ago
parent
commit
a5db7c9acb
No account linked to committer's email address

+ 7
- 5
api/core/model_runtime/model_providers/openai/llm/_position.yaml View File

@@ -1,4 +1,7 @@
- gpt-4o-audio-preview
- o1
- o1-2024-12-17
- o1-mini
- o1-mini-2024-09-12
- gpt-4
- gpt-4o
- gpt-4o-2024-05-13
@@ -7,10 +10,6 @@
- chatgpt-4o-latest
- gpt-4o-mini
- gpt-4o-mini-2024-07-18
- o1-preview
- o1-preview-2024-09-12
- o1-mini
- o1-mini-2024-09-12
- gpt-4-turbo
- gpt-4-turbo-2024-04-09
- gpt-4-turbo-preview
@@ -25,4 +24,7 @@
- gpt-3.5-turbo-1106
- gpt-3.5-turbo-0613
- gpt-3.5-turbo-instruct
- gpt-4o-audio-preview
- o1-preview
- o1-preview-2024-09-12
- text-davinci-003

+ 1
- 1
api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml View File

@@ -22,7 +22,7 @@ parameter_rules:
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
default: 16384
min: 1
max: 16384
- name: response_format

+ 2
- 2
api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml View File

@@ -22,9 +22,9 @@ parameter_rules:
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
default: 16384
min: 1
max: 4096
max: 16384
- name: response_format
label:
zh_Hans: 回复格式

+ 1
- 1
api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml View File

@@ -22,7 +22,7 @@ parameter_rules:
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
default: 16384
min: 1
max: 16384
- name: response_format

+ 1
- 1
api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-11-20.yaml View File

@@ -22,7 +22,7 @@ parameter_rules:
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
default: 16384
min: 1
max: 16384
- name: response_format

+ 2
- 2
api/core/model_runtime/model_providers/openai/llm/gpt-4o-audio-preview.yaml View File

@@ -22,9 +22,9 @@ parameter_rules:
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
default: 16384
min: 1
max: 4096
max: 16384
- name: response_format
label:
zh_Hans: 回复格式

+ 1
- 1
api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml View File

@@ -22,7 +22,7 @@ parameter_rules:
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
default: 16384
min: 1
max: 16384
- name: response_format

+ 1
- 1
api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml View File

@@ -22,7 +22,7 @@ parameter_rules:
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
default: 16384
min: 1
max: 16384
- name: response_format

+ 4
- 4
api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml View File

@@ -22,9 +22,9 @@ parameter_rules:
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
default: 16384
min: 1
max: 4096
max: 16384
- name: response_format
label:
zh_Hans: 回复格式
@@ -38,7 +38,7 @@ parameter_rules:
- text
- json_object
pricing:
input: '5.00'
output: '15.00'
input: '2.50'
output: '10.00'
unit: '0.000001'
currency: USD

+ 35
- 0
api/core/model_runtime/model_providers/openai/llm/o1-2024-12-17.yaml View File

@@ -0,0 +1,35 @@
model: o1-2024-12-17
label:
en_US: o1-2024-12-17
model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: max_tokens
use_template: max_tokens
default: 50000
min: 1
max: 50000
- name: response_format
label:
zh_Hans: 回复格式
en_US: response_format
type: string
help:
zh_Hans: 指定模型必须输出的格式
en_US: specifying the format that the model must output
required: false
options:
- text
- json_object
pricing:
input: '15.00'
output: '60.00'
unit: '0.000001'
currency: USD

+ 36
- 0
api/core/model_runtime/model_providers/openai/llm/o1.yaml View File

@@ -0,0 +1,36 @@
model: o1
label:
zh_Hans: o1
en_US: o1
model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: max_tokens
use_template: max_tokens
default: 50000
min: 1
max: 50000
- name: response_format
label:
zh_Hans: 回复格式
en_US: response_format
type: string
help:
zh_Hans: 指定模型必须输出的格式
en_US: specifying the format that the model must output
required: false
options:
- text
- json_object
pricing:
input: '15.00'
output: '60.00'
unit: '0.000001'
currency: USD

Loading…
Cancel
Save