Kaynağa Gözat

Fix: Wrong cutoff length lead to empty input in openai compatible embedding model. (#7133)

tags/0.7.0
Yanyi Liu 1 yıl önce
ebeveyn
işleme
4cbeb6815b
No account linked to committer's email address

+ 1
- 1
api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py Dosyayı Görüntüle

@@ -72,7 +72,7 @@ class OllamaEmbeddingModel(TextEmbeddingModel):
num_tokens = self._get_num_tokens_by_gpt2(text)

if num_tokens >= context_size:
cutoff = int(len(text) * (np.floor(context_size / num_tokens)))
cutoff = int(np.floor(len(text) * (context_size / num_tokens)))
# if num tokens is larger than context length, only use the start
inputs.append(text[0: cutoff])
else:

+ 1
- 1
api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py Dosyayı Görüntüle

@@ -76,7 +76,7 @@ class OAICompatEmbeddingModel(_CommonOAI_API_Compat, TextEmbeddingModel):
num_tokens = self._get_num_tokens_by_gpt2(text)

if num_tokens >= context_size:
cutoff = int(len(text) * (np.floor(context_size / num_tokens)))
cutoff = int(np.floor(len(text) * (context_size / num_tokens)))
# if num tokens is larger than context length, only use the start
inputs.append(text[0: cutoff])
else:

+ 1
- 1
api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py Dosyayı Görüntüle

@@ -79,7 +79,7 @@ class OAICompatEmbeddingModel(_CommonOAI_API_Compat, TextEmbeddingModel):
num_tokens = self._get_num_tokens_by_gpt2(text)

if num_tokens >= context_size:
cutoff = int(len(text) * (np.floor(context_size / num_tokens)))
cutoff = int(np.floor(len(text) * (context_size / num_tokens)))
# if num tokens is larger than context length, only use the start
inputs.append(text[0: cutoff])
else:

Loading…
İptal
Kaydet