import{ChatOpenAI}from"@langchain/openai"constchatModel=newChatOpenAI({openAIApiKey:"..."})constresponse=awaitchatModel.invoke('Translate "Hello World" into German.')console.log(response.content)
// imports and code as above...import{ChatPromptTemplate}from"@langchain/core/prompts"constprompt=ChatPromptTemplate.fromMessages([["system","You are a world class translator."],["user","{input}"]])constllmChain=prompt.pipe(chatModel)constresponse=awaitllmChain.invoke({input:'Translate "Hello World" into German.'})console.log(response)// Outputs:AIMessage{lc_serializable:true,lc_kwargs:{content:'"Hello World" in German is "Hallo Welt".',additional_kwargs:{function_call:undefined,tool_calls:undefined}},lc_namespace:['langchain_core','messages'],content:'"Hello World" in German is "Hallo Welt".',name:undefined,additional_kwargs:{function_call:undefined,tool_calls:undefined}}
pipe creates a chain of commands - a concept that is central to Langchain
Langchain provides a convenient parser for isolating the model’s output:
// imports and code as above...import{StringOutputParser}from"@langchain/core/output_parsers"constoutputParser=newStringOutputParser()constllmChain=prompt.pipe(chatModel).pipe(outputParser)response=awaitllmChain.invoke({input:'Translate "Hello World" into German.'})console.log(response)// Outputs:"Hello World"inGermanis"Hallo Welt."