瀏覽代碼

feat: add chatgpt-4o-latest (#7289)

tags/0.7.1
非法操作 1 年之前
父節點
當前提交
5aa373dc04
No account linked to committer's email address

+ 1
- 0
api/core/model_runtime/model_providers/openai/llm/_position.yaml 查看文件

- gpt-4o - gpt-4o
- gpt-4o-2024-05-13 - gpt-4o-2024-05-13
- gpt-4o-2024-08-06 - gpt-4o-2024-08-06
- chatgpt-4o-latest
- gpt-4o-mini - gpt-4o-mini
- gpt-4o-mini-2024-07-18 - gpt-4o-mini-2024-07-18
- gpt-4-turbo - gpt-4-turbo

+ 44
- 0
api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml 查看文件

model: chatgpt-4o-latest
label:
zh_Hans: chatgpt-4o-latest
en_US: chatgpt-4o-latest
model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
- vision
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: presence_penalty
use_template: presence_penalty
- name: frequency_penalty
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 16384
- name: response_format
label:
zh_Hans: 回复格式
en_US: response_format
type: string
help:
zh_Hans: 指定模型必须输出的格式
en_US: specifying the format that the model must output
required: false
options:
- text
- json_object
pricing:
input: '2.50'
output: '10.00'
unit: '0.000001'
currency: USD

+ 6
- 3
api/core/model_runtime/model_providers/openai/llm/llm.py 查看文件

tools: Optional[list[PromptMessageTool]] = None) -> int: tools: Optional[list[PromptMessageTool]] = None) -> int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.


Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
Official documentation: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if model.startswith('ft:'): if model.startswith('ft:'):
model = model.split(':')[1] model = model.split(':')[1]


# Currently, we can use gpt4o to calculate chatgpt-4o-latest's token.
if model == "chatgpt-4o-latest":
model = "gpt-4o"

try: try:
encoding = tiktoken.encoding_for_model(model) encoding = tiktoken.encoding_for_model(model)
except KeyError: except KeyError:
raise NotImplementedError( raise NotImplementedError(
f"get_num_tokens_from_messages() is not presently implemented " f"get_num_tokens_from_messages() is not presently implemented "
f"for model {model}." f"for model {model}."
"See https://github.com/openai/openai-python/blob/main/chatml.md for "
"See https://platform.openai.com/docs/advanced-usage/managing-tokens for "
"information on how messages are converted to tokens." "information on how messages are converted to tokens."
) )
num_tokens = 0 num_tokens = 0

+ 1
- 1
web/app/components/header/account-setting/model-provider-page/model-icon/index.tsx 查看文件

}) => { }) => {
const language = useLanguage() const language = useLanguage()


if (provider?.provider === 'openai' && modelName?.startsWith('gpt-4'))
if (provider?.provider === 'openai' && (modelName?.startsWith('gpt-4') || modelName?.includes('4o')))
return <OpenaiViolet className={`w-4 h-4 ${className}`}/> return <OpenaiViolet className={`w-4 h-4 ${className}`}/>


if (provider?.icon_small) { if (provider?.icon_small) {

Loading…
取消
儲存