{ console.log("🤖 Chatbot에 오신 걸 환영합니다! (종료하려면 'exit' 입력)"); while (true) { const input = readline.question("\n🙂 당신: "); if (input.toLowerCase() === "exit") { console.log("👋 챗봇을 종료합니다."); break; } const response = await chat.call([ new HumanMessage(input) ]); console.log("\n🤖 챗봇:", response.content); } }; startChat(); "> { console.log("🤖 Chatbot에 오신 걸 환영합니다! (종료하려면 'exit' 입력)"); while (true) { const input = readline.question("\n🙂 당신: "); if (input.toLowerCase() === "exit") { console.log("👋 챗봇을 종료합니다."); break; } const response = await chat.call([ new HumanMessage(input) ]); console.log("\n🤖 챗봇:", response.content); } }; startChat(); "> { console.log("🤖 Chatbot에 오신 걸 환영합니다! (종료하려면 'exit' 입력)"); while (true) { const input = readline.question("\n🙂 당신: "); if (input.toLowerCase() === "exit") { console.log("👋 챗봇을 종료합니다."); break; } const response = await chat.call([ new HumanMessage(input) ]); console.log("\n🤖 챗봇:", response.content); } }; startChat(); ">
import { ChatOpenAI } from "langchain/chat_models/openai";
import { HumanMessage } from "langchain/schema";
import readline from "readline-sync";
import dotenv from "dotenv";
dotenv.config();
const chat = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
temperature: 0.7,
modelName: "gpt-3.5-turbo"
});
const startChat = async () => {
console.log("🤖 Chatbot에 오신 걸 환영합니다! (종료하려면 'exit' 입력)");
while (true) {
const input = readline.question("\\n🙂 당신: ");
if (input.toLowerCase() === "exit") {
console.log("👋 챗봇을 종료합니다.");
break;
}
const response = await chat.call([
new HumanMessage(input)
]);
console.log("\\n🤖 챗봇:", response.content);
}
};
startChat();
import { ChatOpenAI } from "langchain/chat_models/openai";
import { ConversationChain } from "langchain/chains";
import { BufferMemory } from "langchain/memory";
import readline from "readline-sync";
import dotenv from "dotenv";
dotenv.config();
// 1. LLM 설정
const chat = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
temperature: 0.7,
modelName: "gpt-3.5-turbo"
});
// 2. Memory + Chain 생성
const memory = new BufferMemory();
const chain = new ConversationChain({
llm: chat,
memory: memory
});
const startChat = async () => {
console.log("🤖 대화 기억 챗봇 시작! (종료하려면 'exit')");
while (true) {
const input = readline.question("\\n🙂 당신: ");
if (input.toLowerCase() === "exit") {
console.log("👋 종료합니다!");
break;
}
const res = await chain.call({ input });
console.log("\\n🤖 챗봇:", res.response);
}
};
startChat();
"PDF 문서를 읽고, 사용자의 질문에 그 내용을 바탕으로 대답하는 챗봇”
import * as dotenv from "dotenv";
dotenv.config();
import { ChatOpenAI } from "langchain/chat_models/openai";
import { PDFLoader } from "@langchain/community/document_loaders/fs/pdf";
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { OpenAIEmbeddings } from "langchain/embeddings/openai";
import { RetrievalQAChain } from "langchain/chains";
import readline from "readline-sync";
// 1. PDF 문서 로딩
const loader = new PDFLoader("document.pdf");
const rawDocs = await loader.load();
// 2. 문서 쪼개기
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 50
});
const docs = await splitter.splitDocuments(rawDocs);
// 3. 문서 임베딩 벡터 생성 + 메모리 DB 저장
const vectorStore = await MemoryVectorStore.fromDocuments(
docs,
new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY
})
);
// 4. QA 체인 생성 (문서 검색 → GPT 응답)
const model = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
modelName: "gpt-3.5-turbo",
temperature: 0
});
const chain = RetrievalQAChain.fromLLM(model, vectorStore.asRetriever());
// 5. 사용자와 대화
const startChat = async () => {
console.log("📄 문서 챗봇 시작! (exit 입력 시 종료)");
while (true) {
const input = readline.question("\\n🙂 질문: ");
if (input.toLowerCase() === "exit") break;
const res = await chain.call({ query: input });
console.log("\\n🤖 답변:", res.text);
}
};
startChat();
import * as dotenv from "dotenv";
dotenv.config();
import { ChatOpenAI } from "langchain/chat_models/openai";
import { PDFLoader } from "@langchain/community/document_loaders/fs/pdf";
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
import { Chroma } from "@langchain/community/vectorstores/chroma";
import { OpenAIEmbeddings } from "langchain/embeddings/openai";
import { RetrievalQAChain } from "langchain/chains";
import readline from "readline-sync";
// 1. 문서 로딩
const loader = new PDFLoader("document.pdf");
const rawDocs = await loader.load();
// 2. 문서 쪼개기
const splitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 50
});
const docs = await splitter.splitDocuments(rawDocs);
// 3. 벡터 DB (Chroma)에 저장
const vectorStore = await Chroma.fromDocuments(docs, new OpenAIEmbeddings({
openAIApiKey: process.env.OPENAI_API_KEY,
}), {
collectionName: "my-docs", // Chroma 내부 컬렉션 이름
collectionMetadata: { source: "docs" },
// persistDirectory 생략 시 메모리, 지정 시 파일로 저장
persistDirectory: "./chroma", // DB를 디스크에 저장
});
// 4. 모델 연결
const model = new ChatOpenAI({
openAIApiKey: process.env.OPENAI_API_KEY,
modelName: "gpt-3.5-turbo",
temperature: 0
});
// 5. QA 체인
const chain = RetrievalQAChain.fromLLM(model, vectorStore.asRetriever());
// 6. 대화 루프
const startChat = async () => {
console.log("📄 Chroma 기반 문서 챗봇 시작!");
while (true) {
const input = readline.question("\\n🙂 질문: ");
if (input.toLowerCase() === "exit") break;
const res = await chain.call({ query: input });
console.log("\\n🤖 답변:", res.text);
}
};
startChat();