### What problem does this PR solve? Adding a Bedrock API key for Claude Sonnet was broken. I find the issue came up when trying to test the LLM configuration, the system is a required parameter in boto3. As well, there were problems in Bedrock implementation for embeddings when trying to encode queries. ### Type of change - [X] Bug Fix (non-breaking change which fixes an issue)tags/v0.13.0
| modelId=self.model_name, | modelId=self.model_name, | ||||
| messages=history, | messages=history, | ||||
| inferenceConfig=gen_conf, | inferenceConfig=gen_conf, | ||||
| system=[{"text": system}] if system else None, | |||||
| system=[{"text": (system if system else "Answer the user's message.")}] , | |||||
| ) | ) | ||||
| # Extract and print the response text. | # Extract and print the response text. | ||||
| streaming_response = self.client.converse_stream( | streaming_response = self.client.converse_stream( | ||||
| modelId=self.model_name, | modelId=self.model_name, | ||||
| messages=history, | messages=history, | ||||
| inferenceConfig=gen_conf | |||||
| inferenceConfig=gen_conf, | |||||
| system=[{"text": system if system else ""}], | |||||
| ) | ) | ||||
| # Extract and print the streamed response text in real-time. | # Extract and print the streamed response text in real-time. |
| response = self.client.invoke_model(modelId=self.model_name, body=json.dumps(body)) | response = self.client.invoke_model(modelId=self.model_name, body=json.dumps(body)) | ||||
| model_response = json.loads(response["body"].read()) | model_response = json.loads(response["body"].read()) | ||||
| embeddings.extend([model_response["embedding"]]) | |||||
| embeddings.extend(model_response["embedding"]) | |||||
| return np.array(embeddings), token_count | return np.array(embeddings), token_count | ||||