Browse Source

feat: add proxy configuration for Cohere model (#4152)

tags/0.6.7
Moonlit 1 year ago
parent
commit
2fdd64c1b5
No account linked to committer's email address

+ 18
- 0
api/core/model_runtime/model_providers/cohere/cohere.yaml View File

zh_Hans: 在此输入您的 API Key zh_Hans: 在此输入您的 API Key
en_US: Enter your API Key en_US: Enter your API Key
show_on: [ ] show_on: [ ]
- variable: base_url
label:
zh_Hans: API Base
en_US: API Base
type: text-input
required: false
placeholder:
zh_Hans: 在此输入您的 API Base,如 https://api.cohere.ai/v1
en_US: Enter your API Base, e.g. https://api.cohere.ai/v1
model_credential_schema: model_credential_schema:
model: model:
label: label:
placeholder: placeholder:
zh_Hans: 在此输入您的 API Key zh_Hans: 在此输入您的 API Key
en_US: Enter your API Key en_US: Enter your API Key
- variable: base_url
label:
zh_Hans: API Base
en_US: API Base
type: text-input
required: false
placeholder:
zh_Hans: 在此输入您的 API Base,如 https://api.cohere.ai/v1
en_US: Enter your API Base, e.g. https://api.cohere.ai/v1

+ 5
- 4
api/core/model_runtime/model_providers/cohere/llm/llm.py View File

:return: full response or stream response chunk generator result :return: full response or stream response chunk generator result
""" """
# initialize client # initialize client
client = cohere.Client(credentials.get('api_key'))
client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url'))


if stop: if stop:
model_parameters['end_sequences'] = stop model_parameters['end_sequences'] = stop


return response return response


def _handle_generate_stream_response(self, model: str, credentials: dict, response: Iterator[GenerateStreamedResponse],
def _handle_generate_stream_response(self, model: str, credentials: dict,
response: Iterator[GenerateStreamedResponse],
prompt_messages: list[PromptMessage]) -> Generator: prompt_messages: list[PromptMessage]) -> Generator:
""" """
Handle llm stream response Handle llm stream response
:return: full response or stream response chunk generator result :return: full response or stream response chunk generator result
""" """
# initialize client # initialize client
client = cohere.Client(credentials.get('api_key'))
client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url'))


if stop: if stop:
model_parameters['stop_sequences'] = stop model_parameters['stop_sequences'] = stop
:return: number of tokens :return: number of tokens
""" """
# initialize client # initialize client
client = cohere.Client(credentials.get('api_key'))
client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url'))


response = client.tokenize( response = client.tokenize(
text=text, text=text,

+ 1
- 1
api/core/model_runtime/model_providers/cohere/rerank/rerank.py View File

) )


# initialize client # initialize client
client = cohere.Client(credentials.get('api_key'))
client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url'))
response = client.rerank( response = client.rerank(
query=query, query=query,
documents=docs, documents=docs,

+ 2
- 2
api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py View File

return [] return []


# initialize client # initialize client
client = cohere.Client(credentials.get('api_key'))
client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url'))


response = client.tokenize( response = client.tokenize(
text=text, text=text,
:return: embeddings and used tokens :return: embeddings and used tokens
""" """
# initialize client # initialize client
client = cohere.Client(credentials.get('api_key'))
client = cohere.Client(credentials.get('api_key'), base_url=credentials.get('base_url'))


# call embedding model # call embedding model
response = client.embed( response = client.embed(

Loading…
Cancel
Save