Co-authored-by: crazywoola <427733928@qq.com>tags/0.15.3
| @@ -138,6 +138,18 @@ model_credential_schema: | |||
| show_on: | |||
| - variable: __model_type | |||
| value: llm | |||
| - label: | |||
| en_US: o3-mini | |||
| value: o3-mini | |||
| show_on: | |||
| - variable: __model_type | |||
| value: llm | |||
| - label: | |||
| en_US: o3-mini-2025-01-31 | |||
| value: o3-mini-2025-01-31 | |||
| show_on: | |||
| - variable: __model_type | |||
| value: llm | |||
| - label: | |||
| en_US: o1-preview | |||
| value: o1-preview | |||
| @@ -2,6 +2,8 @@ | |||
| - o1-2024-12-17 | |||
| - o1-mini | |||
| - o1-mini-2024-09-12 | |||
| - o3-mini | |||
| - o3-mini-2025-01-31 | |||
| - gpt-4 | |||
| - gpt-4o | |||
| - gpt-4o-2024-05-13 | |||
| @@ -0,0 +1,33 @@ | |||
| model: o3-mini-2025-01-31 | |||
| label: | |||
| zh_Hans: o3-mini-2025-01-31 | |||
| en_US: o3-mini-2025-01-31 | |||
| model_type: llm | |||
| features: | |||
| - agent-thought | |||
| model_properties: | |||
| mode: chat | |||
| context_size: 200000 | |||
| parameter_rules: | |||
| - name: max_tokens | |||
| use_template: max_tokens | |||
| default: 100000 | |||
| min: 1 | |||
| max: 100000 | |||
| - name: response_format | |||
| label: | |||
| zh_Hans: 回复格式 | |||
| en_US: response_format | |||
| type: string | |||
| help: | |||
| zh_Hans: 指定模型必须输出的格式 | |||
| en_US: specifying the format that the model must output | |||
| required: false | |||
| options: | |||
| - text | |||
| - json_object | |||
| pricing: | |||
| input: '1.10' | |||
| output: '4.40' | |||
| unit: '0.000001' | |||
| currency: USD | |||
| @@ -0,0 +1,33 @@ | |||
| model: o3-mini | |||
| label: | |||
| zh_Hans: o3-mini | |||
| en_US: o3-mini | |||
| model_type: llm | |||
| features: | |||
| - agent-thought | |||
| model_properties: | |||
| mode: chat | |||
| context_size: 200000 | |||
| parameter_rules: | |||
| - name: max_tokens | |||
| use_template: max_tokens | |||
| default: 100000 | |||
| min: 1 | |||
| max: 100000 | |||
| - name: response_format | |||
| label: | |||
| zh_Hans: 回复格式 | |||
| en_US: response_format | |||
| type: string | |||
| help: | |||
| zh_Hans: 指定模型必须输出的格式 | |||
| en_US: specifying the format that the model must output | |||
| required: false | |||
| options: | |||
| - text | |||
| - json_object | |||
| pricing: | |||
| input: '1.10' | |||
| output: '4.40' | |||
| unit: '0.000001' | |||
| currency: USD | |||
| @@ -1,5 +1,7 @@ | |||
| - openai/o1-preview | |||
| - openai/o1-mini | |||
| - openai/o3-mini | |||
| - openai/o3-mini-2025-01-31 | |||
| - openai/gpt-4o | |||
| - openai/gpt-4o-mini | |||
| - openai/gpt-4 | |||
| @@ -0,0 +1,49 @@ | |||
| model: openai/o3-mini-2025-01-31 | |||
| label: | |||
| en_US: o3-mini-2025-01-31 | |||
| model_type: llm | |||
| features: | |||
| - agent-thought | |||
| model_properties: | |||
| mode: chat | |||
| context_size: 200000 | |||
| parameter_rules: | |||
| - name: temperature | |||
| use_template: temperature | |||
| - name: top_p | |||
| use_template: top_p | |||
| - name: top_k | |||
| label: | |||
| zh_Hans: 取样数量 | |||
| en_US: Top k | |||
| type: int | |||
| help: | |||
| zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 | |||
| en_US: Only sample from the top K options for each subsequent token. | |||
| required: false | |||
| - name: presence_penalty | |||
| use_template: presence_penalty | |||
| - name: frequency_penalty | |||
| use_template: frequency_penalty | |||
| - name: max_tokens | |||
| use_template: max_tokens | |||
| default: 512 | |||
| min: 1 | |||
| max: 100000 | |||
| - name: response_format | |||
| label: | |||
| zh_Hans: 回复格式 | |||
| en_US: response_format | |||
| type: string | |||
| help: | |||
| zh_Hans: 指定模型必须输出的格式 | |||
| en_US: specifying the format that the model must output | |||
| required: false | |||
| options: | |||
| - text | |||
| - json_object | |||
| pricing: | |||
| input: "1.10" | |||
| output: "4.40" | |||
| unit: "0.000001" | |||
| currency: USD | |||
| @@ -0,0 +1,49 @@ | |||
| model: openai/o3-mini | |||
| label: | |||
| en_US: o3-mini | |||
| model_type: llm | |||
| features: | |||
| - agent-thought | |||
| model_properties: | |||
| mode: chat | |||
| context_size: 200000 | |||
| parameter_rules: | |||
| - name: temperature | |||
| use_template: temperature | |||
| - name: top_p | |||
| use_template: top_p | |||
| - name: top_k | |||
| label: | |||
| zh_Hans: 取样数量 | |||
| en_US: Top k | |||
| type: int | |||
| help: | |||
| zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 | |||
| en_US: Only sample from the top K options for each subsequent token. | |||
| required: false | |||
| - name: presence_penalty | |||
| use_template: presence_penalty | |||
| - name: frequency_penalty | |||
| use_template: frequency_penalty | |||
| - name: max_tokens | |||
| use_template: max_tokens | |||
| default: 512 | |||
| min: 1 | |||
| max: 100000 | |||
| - name: response_format | |||
| label: | |||
| zh_Hans: 回复格式 | |||
| en_US: response_format | |||
| type: string | |||
| help: | |||
| zh_Hans: 指定模型必须输出的格式 | |||
| en_US: specifying the format that the model must output | |||
| required: false | |||
| options: | |||
| - text | |||
| - json_object | |||
| pricing: | |||
| input: "1.10" | |||
| output: "4.40" | |||
| unit: "0.000001" | |||
| currency: USD | |||