Langchain

This article guides you through integrating your SaaS with orq.ai and Langchain using our Python SDK. By the end of the article, you'll know how to set up a Deployment in orq.ai, perform prompt engineering, request a Deployment variant using our SDK code generator, and send it to Langchain to predict an output.

This guide shows you how to integrate your products with Langchain using the orq.ai Python SDK.

Step 1: Install the SDK

# orquesta sdk
pip install orquesta-sdk

# langchain
pip install langchain
// orquesta sdk
npm install @orquesta/node --save
yarn add @orquesta/node

// langchain
npm install langchain
yarn add langchain

Step 2: Get the Deployment configuration

You can find your orq.ai API Key in your workspace https://my.orquesta.dev/<workspace-name>/settings/developers

from orquesta_sdk import OrquestaClientOptions, Orquesta

api_key = "ORQUESTA_API_KEY"

# Initialize Orquesta client
options = OrquestaClientOptions(
    api_key=api_key,
    environment="production",
)

client = Orquesta(options)

from langchain_openai import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate
OPENAI_API_KEY="YOUR_OPENAI_API_KEY"

deployment_config = client.deployments.get_config(
    key="Deployment-with-Langchain",
    context={"locale": ["en"]},
    inputs={"country": "Netherlands"},
    metadata={"custom-field-name": "custom-metadata-value"},
)

config = deployment_config.to_dict()


chat_messages = []

for message in config.get("messages"):
    if message.get("role") == "system":
        chat_messages.append(SystemMessage(content=message.get("content")))
    elif message.get("role") == "user":
        chat_messages.append(HumanMessage(content=message.get("content")))
    elif message.get("role") == "assistant":
        chat_messages.append(AIMessage(content=message.get("content")))

llm = ChatOpenAI(
    openai_api_key=OPENAI_API_KEY,
    model_name=config.get("model"),
)

template = ChatPromptTemplate.from_messages(chat_messages)

chain = LLMChain(llm=llm, prompt=template)

chain.invoke({})
import { createClient } from '@orquesta/node';
import { ChatOpenAI } from '@langchain/openai';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import {
  AIMessage,
  HumanMessage,
  SystemMessage,
} from '@langchain/core/messages';

const start = async () => {
  const { createClient } = await import('@orquesta/node');

  // Initialize Orquesta client
  const client = createClient({
    apiKey: 'ORQUESTA_API_KEY',
    environment: 'production',
  });

  // Getting the deployment config
  const deploymentConfig = await client.deployments.getConfig({
    key: 'Deployment-with-Langchain',
    context: {
      environments: ['test', 'production'],
      locale: ['en'],
    },
    inputs: {
      country: 'Netherlands',
    },
    metadata: {
      'custom-field-name': 'custom-metadata-value',
    },
  });

  console.log(deploymentConfig);

  const deploymentConfigObj: any = deploymentConfig;
  // Send the payload to OpenAI
  const openaiApiKey: string = 'OPENAI_API_KEY';

  // Prepare chat messages for LangChain
  const chatMessages: any[] = deploymentConfigObj.messages.map(
    (message: any) => {
      switch (message.role) {
        case 'system':
          return new SystemMessage({ content: message.content });
        case 'user':
          return new HumanMessage({ content: message.content });
        case 'assistant':
          return new AIMessage({ content: message.content });
        default:
          throw new Error(`Invalid message role: ${message.role}`);
      }
    }
  );

  // Initialize LangChain ChatOpenAI
  const llm = new ChatOpenAI({
    openAIApiKey: openaiApiKey,
    modelName: deploymentConfigObj.model,
  });

  // Create a ChatPromptTemplate from chat messages
  const template = ChatPromptTemplate.fromMessages(chatMessages);

  // We can now combine these into a simple LLM chain:
  const chain = template.pipe(llm);

  // We can now invoke
  await chain.invoke({
    input: template,
  });
};

// Call the async function
start();

Add additional metric to the request

deployment_config.add_metrics(
  chain_id="c4a75b53-62fa-401b-8e97-493f3d299316",
  conversation_id="ee7b0c8c-eeb2-43cf-83e9-a4a49f8f13ea",
  user_id="e3a202a6-461b-447c-abe2-018ba4d04cd0",
  feedback={"score": 100},
  metadata={
      "custom": "custom_metadata",
      "chain_id": "ad1231xsdaABw",
  },
  usage={
      "prompt_tokens": 100,
      "completion_tokens": 900,
      "total_tokens": 1000,
  },
  performance={
      "latency": 9000,
      "time_to_first_token": 250,
  }
)
deployment.addMetrics({
  chain_id: "c4a75b53-62fa-401b-8e97-493f3d299316",
  conversation_id: "ee7b0c8c-eeb2-43cf-83e9-a4a49f8f13ea",
  user_id: "e3a202a6-461b-447c-abe2-018ba4d04cd0",
  feedback: {
    score: 100
  },
  metadata: {
    custom: "custom_metadata",
    chain_id: "ad1231xsdaABw"
  },
  usage: {
    prompt_tokens: 100,
    completion_tokens: 900,
    total_tokens: 1000
  },
  performance: {
    latency: 9000,
    time_to_first_token: 250
  }
})