Creating chat sessions in knowledge models
Below is an example script to chat with the model that creates a new chat session for the specified model and sends messages to it.
Define the constants:
python
import requests
KM_ID = '{YOUR_KM_ID}'
API_URL = 'https://training.constructor.app/api/platform-kmapi/v1'
API_KEY = '{YOUR_API_KEY}'Define the headers:
python
# Define the headers
headers = {
'X-KM-AccessKey': f'Bearer {API_KEY}'
}Select an LLM from the available options to use for generating responses:
python
print(requests.get(f'{API_URL}/language_models', headers=headers).json())Example:
json
{
"results": [
{
"id": "9d38cf09f0ff4fe8ab7b2e714adb55b2",
"name": "GPT-4o Mini",
"description": "The most cost-efficient small model",
"hosted_by": {"name": "OpenAI"},
"code": "gpt"
},
{
"id": "83db90a3c67648e0ba43b48c2f2c39b6",
"name": "GPT-4o",
"description": "Most advanced and efficient model, excelling in non-English languages",
"hosted_by": {"name": "OpenAI"},
"code": "gpt"
},
{
"id": "23fb715e7426484ba8d89a2e930d3e9f",
"name": "GPT-4 Turbo",
"description": "Offers improved instruction following, JSON mode, reproducible outputs, and parallel function calling, reducing incomplete tasks.",
"hosted_by": {"name": "OpenAI"},
"code": "gpt"
},
{
"id": "e4d51d8b3f2a4abca4d5f58b711053a6",
"name": "GPT-4",
"description": "Solves complex problems with high accuracy, leveraging broad knowledge and advanced reasoning.",
"hosted_by": {"name": "OpenAI"},
"code": "gpt"
},
{
"id": "643282cc66eb4875ab51213438a56a2a",
"name": "GPT-o1 Preview",
"description": "Beta version of the o1 model, which performs complex reasoning and think before you answer.",
"hosted_by": {"name": "OpenAI"},
"code": "gpt"
},
{
"id": "aaf4df69643e4c728aff9773c7039879",
"name": "GPT-o1 Mini",
"description": "Light version of the o1 model with twice the context window size and response speed.",
"hosted_by": {"name": "OpenAI"},
"code": "gpt"
},
{
"id": "fc5f80c1b7fb49ffb0b9a059d2cf6522",
"name": "Claude 3.5 Sonnet",
"description": "Best combination of performance and speed for efficient, high-throughput tasks.",
"hosted_by": {"name": "Anthropic"},
"code": "claude"
},
{
"id": "fa4138f0e3454efda84767c338a06f33",
"name": "Claude 3 Opus",
"description": "Highest-performing model, which can handle complex analysis, longer tasks with many steps, and higher-order math and coding tasks.",
"hosted_by": {"name": "Anthropic"},
"code": "claude"
},
{
"id": "13c40686125844f590567d0c99d879d7",
"name": "Gemini 1.5 Pro",
"description": "Complex reasoning tasks such as code and text generation, text editing, problem solving, data extraction and generation.",
"hosted_by": {"name": "Google"},
"code": "gemini"
},
{
"id": "5ae60bf998ee4bc8a1136078dc9ca367",
"name": "Gemini 1.5 Flash",
"description": "Fast and versatile performance across a diverse variety of tasks.",
"hosted_by": {"name": "Google"},
"code": "gemini"
},
{
"id": "41350d36e6944876bede32d82a9dad01",
"name": "Gemini 1.0 Pro",
"description": "Natural language tasks, multi-turn text and code chat, and code generation.",
"hosted_by": {"name": "Google"},
"code": "gemini"
}
],
"total": 11
}Create a knowledg model if you don’t already have one. See Creating knowledge models.
Create a new chat session using 'gpt-4o' as the chat engine and Knowledge model only for the Chat with setting:
python
def create_session(km_id):
data = {
"llm_id": "9d38cf09f0ff4fe8ab7b2e714adb55b2", # gpt-4o mini, change the id to any other available model if required
"mode": "model"
}
response = requests.post(f'{API_URL}/knowledge-models/{KM_ID}/chat-sessions', headers=headers, json=data)
return response.json()Send a message to the chat session:
python
def send_message(text, session_id, mode="model"):
headers = {'X-KM-AccessKey': f'Bearer {API_KEY}'}
data = {"text": text, "mode": mode}
response = requests.post(f'{API_URL}/knowledge-models/{KM_ID}/chat-sessions/{session_id}/messages', headers=headers, json=data)
return response.json()Get responses from the chat session:
python
import time
import logging
import requests
# Constants
AI_MESSAGE_TYPE = "ai_message"
PROCESSING_STATUS = "processing"
DONE_STATUS = "done"
# Configure logging
logging.basicConfig(level=logging.INFO)
def ask(text, session_id, mode="model", timeout=60, request_timeout=15, retry_delay=3):
response = send_message(text, session_id)
headers = {'X-KM-AccessKey': f'Bearer {API_KEY}'}
start_time = time.time()
while True:
response = requests.get(
f'{API_URL}/knowledge-models/{KM_ID}/chat-sessions/{session_id}/messages',
headers=headers,
timeout=request_timeout
)
response.raise_for_status()
message = response.json()["results"][0]
if message["type"] != AI_MESSAGE_TYPE:
break
status_name = message["status"]["name"]
if status_name == PROCESSING_STATUS:
logging.info("Waiting for reply...")
time.sleep(retry_delay)
elif status_name == DONE_STATUS:
return message
if time.time() - start_time > timeout:
logging.warning("Operation timed out.")
break
return NoneExecute the entire flow:
python
new_model = create_model("ML in Materials Science", "Overview of the current trends in Materials Science", "all")
KM_ID = new_model["id"]
session = create_session(KM_ID) # Create a new session
session_id = session["id"] # Get session ID from the response
text = "Hello" # Define the text you want to send
answer = ask(text, session_id) # Send message and retrieve the reply
print(answer) # Print the replyjson
{
"id": "b60f77c5132149fbb859e87f44ef2b0f",
"type": "ai_message",
"created_at": "2024-12-18T08:41:18.058713Z",
"included_documents": [],
"mode": "model",
"status": {
"name": "done",
"message": "produce_final_answer"
},
"like_status": "none",
"quick_replies": [],
"content": {
"type": "text",
"text": "I don't know.",
"references": [],
"documents": [],
"links": []
},
"llm_id": "9d38cf09f0ff4fe8ab7b2e714adb55b2"
}