diff --git a/07 ollama RAG/main.py b/07 ollama RAG/main.py index 662e9af..e1cc7f7 100644 --- a/07 ollama RAG/main.py +++ b/07 ollama RAG/main.py @@ -1,15 +1,15 @@ -from langchain_community.embeddings import OllamaEmbeddings -from langchain_community.chat_models import ChatOllama +from langchain_ollama import OllamaEmbeddings +from langchain_ollama import ChatOllama from langchain_core.prompts import PromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain.text_splitter import RecursiveCharacterTextSplitter import chromadb # Initialize the embedding model -embed_model = OllamaEmbeddings(model="mistral") +embed_model = OllamaEmbeddings(model="nomic-embed-text") # Path to the markdown file -file_path = "./data/product.md" +file_path = "./testdata/product.md" # Function to read the content of the file def load_file_content(file_path): @@ -74,6 +74,6 @@ def answer_question(question): return generation # Example usage -question = "What does the product look liked?" -answer = answer_question(question) -print(f"Answer: {answer}") + +print(f"Answer: {answer_question("What does the product look like?")}") +#print(f"Answer: {answer_question("What is the combat like?")}")