Integration using Langchain

This article guides you through integrating your SaaS with orq.ai and Langchain using our Python SDK. By the end of the article, you'll know how to set up a Deployment in orq.ai, perform prompt engineering, request a Deployment variant using our SDK code generator, and send it to Langchain to predict an output.

This guide shows you how to integrate your products with Langchain using the Orq.ai Python SDK.

Step 1: Install the SDK

# orquesta sdk
pip install orq-ai-sdk

# langchain
pip install langchain
pip install langchain_openai
// orquesta sdk
npm install @orq-ai/node --save

// langchain
// use node>=18.0
npm install langchain
npm install @lanchain/core
npm install @langchain/openai

Step 2: Get the Deployment configuration

You can find your Orq.ai API Key in your workspace https://my.orq.ai/<workspace-name>/settings/developers

from orq_ai_sdk import OrqAI

api_key = "ORQ_AI_API_KEY"

client = OrqAI(
  api_key=api_key,
  environment="production"
)

from langchain_openai import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate
OPENAI_API_KEY="YOUR_OPENAI_API_KEY"

deployment_config = client.deployments.get_config(
    key="<deployment-name-with-langchain>",
    context={"locale": ["en"]},
    inputs={"country": "Netherlands"},
    metadata={"custom-field-name": "custom-metadata-value"},
)

config = deployment_config.to_dict()


chat_messages = []

for message in config.get("messages"):
    if message.get("role") == "system":
        chat_messages.append(SystemMessage(content=message.get("content")))
    elif message.get("role") == "user":
        chat_messages.append(HumanMessage(content=message.get("content")))
    elif message.get("role") == "assistant":
        chat_messages.append(AIMessage(content=message.get("content")))

llm = ChatOpenAI(
    openai_api_key=OPENAI_API_KEY,
    model_name=config.get("model"),
)

template = ChatPromptTemplate.from_messages(chat_messages)

chain = LLMChain(llm=llm, prompt=template)

chain.invoke({})
import { createClient } from '@orq-ai/node';
import { ChatOpenAI } from '@langchain/openai';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import {
  AIMessage,
  HumanMessage,
  SystemMessage,
} from '@langchain/core/messages';

const client = createClient({
  apiKey: 'ORQ_API_KEY',
  environment: 'production',
});

// Get Orq Deployment Config
const deploymentConfig = await client.deployments.getConfig({
   key: "deployment-with-langchain",
   context: {
      environments: ["production"],
      locale: ["en"]
   },
   metadata: {
      "custom-field-name": "custom-metadata-value"
   }
});

const prompt = deploymentConfig.messages
	.map((message) => message.content)
	.join('\n');

const openaiApiKey = 'OPENAPI_KEY';

// Prepare chat messages for LangChain
const chatMessages = deploymentConfig.messages.map(
	(message) => {
		switch (message.role) {
			case 'system':
				return new SystemMessage({ content: message.content });
			case 'user':
				return new HumanMessage({ content: message.content });
			case 'assistant':
				return new AIMessage({ content: message.content });
			default:
				throw new Error(`Invalid message role: ${message.role}`);
		}
	}
);

// Initialize LangChain ChatOpenAI
const llm = new ChatOpenAI({
	openAIApiKey: openaiApiKey,
	modelName: deploymentConfig.model,
});

// Create a ChatPromptTemplate from chat messages
const template = ChatPromptTemplate.fromMessages(chatMessages);


// We can now combine these into a simple LLM chain:
const chain = template.pipe(llm);

// We can now invoke
chain.invoke({
	input: template,
});

Add additional metric to the request

deployment_config.add_metrics(
  chain_id="c4a75b53-62fa-401b-8e97-493f3d299316",
  conversation_id="ee7b0c8c-eeb2-43cf-83e9-a4a49f8f13ea",
  user_id="e3a202a6-461b-447c-abe2-018ba4d04cd0",
  feedback={"score": 100},
  metadata={
      "custom": "custom_metadata",
      "chain_id": "ad1231xsdaABw",
  },
  usage={
      "prompt_tokens": 100,
      "completion_tokens": 900,
      "total_tokens": 1000,
  },
  performance={
      "latency": 9000,
      "time_to_first_token": 250,
  }
)
deploymentConfig.addMetrics({
  chain_id: "c4a75b53-62fa-401b-8e97-493f3d299316",
  conversation_id: "ee7b0c8c-eeb2-43cf-83e9-a4a49f8f13ea",
  user_id: "e3a202a6-461b-447c-abe2-018ba4d04cd0",
  feedback: {
    score: 100
  },
  metadata: {
    custom: "custom_metadata",
    chain_id: "ad1231xsdaABw"
  },
  usage: {
    prompt_tokens: 100,
    completion_tokens: 900,
    total_tokens: 1000
  },
  performance: {
    latency: 9000,
    time_to_first_token: 250
  }
})