Browse Source

get keep alive from env (#9039)

### What problem does this PR solve?

get keepalive from env

### Type of change

- [x] Refactoring
tags/v0.20.0
Stephen Hu 3 months ago
parent
commit
53b0b0e583
No account linked to committer's email address
2 changed files with 6 additions and 4 deletions
  1. 3
    2
      rag/llm/cv_model.py
  2. 3
    2
      rag/llm/embedding_model.py

+ 3
- 2
rag/llm/cv_model.py View File

@@ -469,6 +469,7 @@ class OllamaCV(Base):
self.client = Client(host=kwargs["base_url"])
self.model_name = model_name
self.lang = lang
self.keep_alive = kwargs.get("ollama_keep_alive", int(os.environ.get("OLLAMA_KEEP_ALIVE", -1)))

def describe(self, image):
prompt = self.prompt("")
@@ -517,7 +518,7 @@ class OllamaCV(Base):
model=self.model_name,
messages=history,
options=options,
keep_alive=-1,
keep_alive=self.keep_alive,
)

ans = response["message"]["content"].strip()
@@ -548,7 +549,7 @@ class OllamaCV(Base):
messages=history,
stream=True,
options=options,
keep_alive=-1,
keep_alive=self.keep_alive,
)
for resp in response:
if resp["done"]:

+ 3
- 2
rag/llm/embedding_model.py View File

@@ -285,6 +285,7 @@ class OllamaEmbed(Base):
def __init__(self, key, model_name, **kwargs):
self.client = Client(host=kwargs["base_url"]) if not key or key == "x" else Client(host=kwargs["base_url"], headers={"Authorization": f"Bearer {key}"})
self.model_name = model_name
self.keep_alive = kwargs.get("ollama_keep_alive", int(os.environ.get("OLLAMA_KEEP_ALIVE", -1)))

def encode(self, texts: list):
arr = []
@@ -293,7 +294,7 @@ class OllamaEmbed(Base):
# remove special tokens if they exist
for token in OllamaEmbed._special_tokens:
txt = txt.replace(token, "")
res = self.client.embeddings(prompt=txt, model=self.model_name, options={"use_mmap": True}, keep_alive=-1)
res = self.client.embeddings(prompt=txt, model=self.model_name, options={"use_mmap": True}, keep_alive=self.keep_alive)
try:
arr.append(res["embedding"])
except Exception as _e:
@@ -305,7 +306,7 @@ class OllamaEmbed(Base):
# remove special tokens if they exist
for token in OllamaEmbed._special_tokens:
text = text.replace(token, "")
res = self.client.embeddings(prompt=text, model=self.model_name, options={"use_mmap": True}, keep_alive=-1)
res = self.client.embeddings(prompt=text, model=self.model_name, options={"use_mmap": True}, keep_alive=self.keep_alive)
try:
return np.array(res["embedding"]), 128
except Exception as _e:

Loading…
Cancel
Save