from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromtTemplate
from langchain.output_parser import ResnseSchema, StructuredOutputParser
chat = ChatOpenAI(temperature=0.0)
template_string = f"Translate thi text that is delimited by <<< >>> into a style that is {style}. text <<<{text}>>>"
review_template_2 = """\
For the following text, extract the following information:
{format_instructions}
text: {text}
"""
"
gift_schema = ResponseSchema(name="gift",
description="Was the item purchased\
as a gift for someone else? \
Answer True if yes,\
False if not or unknown.")
delivery_days_schema = ResponseSchema(name="delivery_days",
description="How many days\
did it take for the product\
to arrive? If this \
information is not found,\
output -1.")
price_value_schema = ResponseSchema(name="price_value",
description="Extract any\
sentences about the value or \
price, and output them as a \
comma separated Python list.")
response_schemas = [gift_schema,
delivery_days_schema,
price_value_schema]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = output_parser.get_format_instructions()
prompt = ChatPromptTEmplate.from_template()
output_dict = output_parser.parse(response.content)
prompt = ChatPromptTemplate.from_template(template=review_template)
messages = prompt.format_messages(text=customer_review, format_instructions=format_instructions)
response = chat(messages)
output_dict = output_parser.parse(response.content)
ConversationBufferMemory()
ConversationBufferWindowMemory(k=1)
ConversationTokenBufferMemory(llm=llm, max_token_limit=30)
ConversationSummaryBufferMemory(llm=llm, max_token_limit=100)
llm = ChatOpenAI(temperature=0.0, model=llm_model)
memory = ConversationBufferMemory()
conversation = ConversationChain(
llm=llm,
memory = memory,
verbose=True
)
https://eightify.app/summary/computer-science-and-technology/free-google-palm-api-usage-step-by-step-guide
from lahnchain.text_splitter import RecusiveCharacterTextSplitter
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.chains import RetrievalQA
index= VectorstoreIndexCreator(
embeddings=GooglePalmEmbeddings(),
text_splitter=RecursiveCharecterTextSplitter(chunk_size=800, chunk_overlap=0)
).from_loaders(loaders)
lllm = GooglePalm(temperature=0.1)
chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriver=index.vectorstore.as_retriever(),
return_source_documents=True
)