Signed-off-by: -LAN- <laipz8200@outlook.com>tags/0.15.3
| - deepseek-r1-distill-llama-70b | |||||
| - llama-3.1-405b-reasoning | - llama-3.1-405b-reasoning | ||||
| - llama-3.3-70b-versatile | - llama-3.3-70b-versatile | ||||
| - llama-3.1-70b-versatile | - llama-3.1-70b-versatile | 
| model: deepseek-r1-distill-llama-70b | |||||
| label: | |||||
| en_US: DeepSeek R1 Distill Llama 70b | |||||
| model_type: llm | |||||
| features: | |||||
| - agent-thought | |||||
| model_properties: | |||||
| mode: chat | |||||
| context_size: 128000 | |||||
| parameter_rules: | |||||
| - name: temperature | |||||
| use_template: temperature | |||||
| - name: top_p | |||||
| use_template: top_p | |||||
| - name: max_tokens | |||||
| use_template: max_tokens | |||||
| default: 512 | |||||
| min: 1 | |||||
| max: 8192 | |||||
| - name: response_format | |||||
| label: | |||||
| zh_Hans: 回复格式 | |||||
| en_US: Response Format | |||||
| type: string | |||||
| help: | |||||
| zh_Hans: 指定模型必须输出的格式 | |||||
| en_US: specifying the format that the model must output | |||||
| required: false | |||||
| options: | |||||
| - text | |||||
| - json_object | |||||
| pricing: | |||||
| input: '3.00' | |||||
| output: '3.00' | |||||
| unit: '0.000001' | |||||
| currency: USD |