| }, | }, | ||||
| 'model_config': { | 'model_config': { | ||||
| 'provider': 'openai', | 'provider': 'openai', | ||||
| 'model_id': 'text-davinci-003', | |||||
| 'model_id': 'gpt-3.5-turbo-instruct', | |||||
| 'configs': { | 'configs': { | ||||
| 'prompt_template': '', | 'prompt_template': '', | ||||
| 'prompt_variables': [], | 'prompt_variables': [], | ||||
| }, | }, | ||||
| 'model': json.dumps({ | 'model': json.dumps({ | ||||
| "provider": "openai", | "provider": "openai", | ||||
| "name": "text-davinci-003", | |||||
| "name": "gpt-3.5-turbo-instruct", | |||||
| "completion_params": { | "completion_params": { | ||||
| "max_tokens": 512, | "max_tokens": 512, | ||||
| "temperature": 1, | "temperature": 1, | ||||
| 'mode': 'completion', | 'mode': 'completion', | ||||
| 'model_config': AppModelConfig( | 'model_config': AppModelConfig( | ||||
| provider='openai', | provider='openai', | ||||
| model_id='text-davinci-003', | |||||
| model_id='gpt-3.5-turbo-instruct', | |||||
| configs={ | configs={ | ||||
| 'prompt_template': "Please translate the following text into {{target_language}}:\n", | 'prompt_template': "Please translate the following text into {{target_language}}:\n", | ||||
| 'prompt_variables': [ | 'prompt_variables': [ | ||||
| pre_prompt="Please translate the following text into {{target_language}}:\n", | pre_prompt="Please translate the following text into {{target_language}}:\n", | ||||
| model=json.dumps({ | model=json.dumps({ | ||||
| "provider": "openai", | "provider": "openai", | ||||
| "name": "text-davinci-003", | |||||
| "name": "gpt-3.5-turbo-instruct", | |||||
| "completion_params": { | "completion_params": { | ||||
| "max_tokens": 1000, | "max_tokens": 1000, | ||||
| "temperature": 0, | "temperature": 0, | ||||
| 'mode': 'completion', | 'mode': 'completion', | ||||
| 'model_config': AppModelConfig( | 'model_config': AppModelConfig( | ||||
| provider='openai', | provider='openai', | ||||
| model_id='text-davinci-003', | |||||
| model_id='gpt-3.5-turbo-instruct', | |||||
| configs={ | configs={ | ||||
| 'prompt_template': "请将以下文本翻译为{{target_language}}:\n", | 'prompt_template': "请将以下文本翻译为{{target_language}}:\n", | ||||
| 'prompt_variables': [ | 'prompt_variables': [ | ||||
| pre_prompt="请将以下文本翻译为{{target_language}}:\n", | pre_prompt="请将以下文本翻译为{{target_language}}:\n", | ||||
| model=json.dumps({ | model=json.dumps({ | ||||
| "provider": "openai", | "provider": "openai", | ||||
| "name": "text-davinci-003", | |||||
| "name": "gpt-3.5-turbo-instruct", | |||||
| "completion_params": { | "completion_params": { | ||||
| "max_tokens": 1000, | "max_tokens": 1000, | ||||
| "temperature": 0, | "temperature": 0, |
| from models.provider import ProviderType, ProviderQuotaType | from models.provider import ProviderType, ProviderQuotaType | ||||
| COMPLETION_MODELS = [ | COMPLETION_MODELS = [ | ||||
| 'gpt-3.5-turbo-instruct', # 4,096 tokens | |||||
| 'text-davinci-003', # 4,097 tokens | 'text-davinci-003', # 4,097 tokens | ||||
| ] | ] | ||||
| 'gpt-4': 8192, | 'gpt-4': 8192, | ||||
| 'gpt-4-32k': 32768, | 'gpt-4-32k': 32768, | ||||
| 'gpt-3.5-turbo': 4096, | 'gpt-3.5-turbo': 4096, | ||||
| 'gpt-3.5-turbo-instruct': 4096, | |||||
| 'gpt-3.5-turbo-16k': 16384, | 'gpt-3.5-turbo-16k': 16384, | ||||
| 'text-davinci-003': 4097, | 'text-davinci-003': 4097, | ||||
| } | } |
| ModelFeature.AGENT_THOUGHT.value | ModelFeature.AGENT_THOUGHT.value | ||||
| ] | ] | ||||
| }, | }, | ||||
| { | |||||
| 'id': 'gpt-3.5-turbo-instruct', | |||||
| 'name': 'GPT-3.5-Turbo-Instruct', | |||||
| }, | |||||
| { | { | ||||
| 'id': 'gpt-3.5-turbo-16k', | 'id': 'gpt-3.5-turbo-16k', | ||||
| 'name': 'gpt-3.5-turbo-16k', | 'name': 'gpt-3.5-turbo-16k', | ||||
| 'gpt-4': 8192, | 'gpt-4': 8192, | ||||
| 'gpt-4-32k': 32768, | 'gpt-4-32k': 32768, | ||||
| 'gpt-3.5-turbo': 4096, | 'gpt-3.5-turbo': 4096, | ||||
| 'gpt-3.5-turbo-instruct': 4096, | |||||
| 'gpt-3.5-turbo-16k': 16384, | 'gpt-3.5-turbo-16k': 16384, | ||||
| 'text-davinci-003': 4097, | 'text-davinci-003': 4097, | ||||
| } | } |
| "unit": "0.001", | "unit": "0.001", | ||||
| "currency": "USD" | "currency": "USD" | ||||
| }, | }, | ||||
| "gpt-3.5-turbo-instruct": { | |||||
| "prompt": "0.0015", | |||||
| "completion": "0.002", | |||||
| "unit": "0.001", | |||||
| "currency": "USD" | |||||
| }, | |||||
| "gpt-3.5-turbo-16k": { | "gpt-3.5-turbo-16k": { | ||||
| "prompt": "0.003", | "prompt": "0.003", | ||||
| "completion": "0.004", | "completion": "0.004", |
| max_retries: int = 1 | max_retries: int = 1 | ||||
| """Maximum number of retries to make when generating.""" | """Maximum number of retries to make when generating.""" | ||||
| def __new__(cls, **data: Any): # type: ignore | |||||
| return super(EnhanceOpenAI, cls).__new__(cls) | |||||
| @root_validator() | @root_validator() | ||||
| def validate_environment(cls, values: Dict) -> Dict: | def validate_environment(cls, values: Dict) -> Dict: | ||||
| """Validate that api key and python package exists in environment.""" | """Validate that api key and python package exists in environment.""" |
| gunicorn~=21.2.0 | gunicorn~=21.2.0 | ||||
| gevent~=22.10.2 | gevent~=22.10.2 | ||||
| langchain==0.0.250 | langchain==0.0.250 | ||||
| openai~=0.27.8 | |||||
| openai~=0.28.0 | |||||
| psycopg2-binary~=2.9.6 | psycopg2-binary~=2.9.6 | ||||
| pycryptodome==3.17 | pycryptodome==3.17 | ||||
| python-dotenv==1.0.0 | python-dotenv==1.0.0 |
| @patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) | @patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) | ||||
| def test_get_num_tokens(mock_decrypt): | def test_get_num_tokens(mock_decrypt): | ||||
| openai_model = get_mock_openai_model('text-davinci-003') | |||||
| openai_model = get_mock_openai_model('gpt-3.5-turbo-instruct') | |||||
| rst = openai_model.get_num_tokens([PromptMessage(content='you are a kindness Assistant.')]) | rst = openai_model.get_num_tokens([PromptMessage(content='you are a kindness Assistant.')]) | ||||
| assert rst == 6 | assert rst == 6 | ||||
| def test_run(mock_decrypt, mocker): | def test_run(mock_decrypt, mocker): | ||||
| mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None) | mocker.patch('core.model_providers.providers.base.BaseModelProvider.update_last_used', return_value=None) | ||||
| openai_model = get_mock_openai_model('text-davinci-003') | |||||
| openai_model = get_mock_openai_model('gpt-3.5-turbo-instruct') | |||||
| rst = openai_model.run( | rst = openai_model.run( | ||||
| [PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')], | [PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')], | ||||
| stop=['\nHuman:'], | stop=['\nHuman:'], |