# non-streaming resp = OpenAI().complete("Paul Graham is ") print(resp)
# using streaming endpoint from llama_index.llms.openai import OpenAI
llm = OpenAI() resp = llm.stream_complete("Paul Graham is ") for delta in resp: print(delta, end="")
1.1.2 Chat例子
1 2 3 4 5 6 7 8 9 10 11
from llama_index.core.llms import ChatMessage from llama_index.llms.openai import OpenAI
messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp = OpenAI().chat(messages) print(resp)
输出
1
assistant: Ahoy matey! The name's Captain Rainbowbeard! Aye, I be a pirate with a love for all things colorful and bright, from me beard to me ship's sails. What can I do for ye today, me hearty?
import torch from transformers import BitsAndBytesConfig from llama_index.core.prompts import PromptTemplate from llama_index.llms.huggingface import HuggingFaceLLM
# quantize to save memory quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, )
response = llm.complete("What is the meaning of life?") print(str(response))
输出
1 2 3 4 5 6 7 8 9
This is a question that has been asked for centuries, and there is no one definitive answer. However, there are many different perspectives and philosophies that offer insights into this question.
One perspective is that the meaning of life is to find happiness and fulfillment. This can be achieved through various means, such as pursuing one's passions, cultivating meaningful relationships, and contributing to society in a positive way.
Another perspective is that the meaning of life is to serve a higher purpose or to fulfill a divine plan. This can involve following a particular religious or spiritual path, or simply living a life that is in alignment with one's values and beliefs.
A third perspective is that the meaning of life is to learn and grow, both as individuals and as a society. This can involve seeking out knowledge and understanding, as well as working to improve the world around us.
Ultimately, the meaning of life is a deeply personal and subjective question. Each individual must find their own answers and live their lives in accordance with their own values and beliefs.
from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings from llama_index.embeddings.huggingface import HuggingFaceEmbedding
# global Settings.embed_model = OpenAIEmbedding()
# per-index index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core.multi_modal_llms.generic_utils import load_image_urls from llama_index.core import SimpleDirectoryReader
# load image documents from urls image_documents = load_image_urls(image_urls)
# load image documents from local directory image_documents = SimpleDirectoryReader(local_directory).load_data()
# non-streaming openai_mm_llm = OpenAIMultiModal( model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=300 ) response = openai_mm_llm.complete( prompt="what is in the image?", image_documents=image_documents )
from llama_index.core.indices import MultiModalVectorStoreIndex from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import SimpleDirectoryReader, StorageContext
import qdrant_client from llama_index.core import SimpleDirectoryReader
# Create a local Qdrant vector store client = qdrant_client.QdrantClient(path="qdrant_mm_db")
# if you only need image_store for image retrieval, # you can remove text_sotre text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" )
# Load text and image documents from local folder documents = SimpleDirectoryReader("./data_folder/").load_data() # Create the MultiModal index index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, )
from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core import PromptTemplate from llama_index.core.query_engine import SimpleMultiModalQueryEngine
# retrieve more information from the GPT4V response retrieval_results = retriever_engine.retrieve(response)
# if you only need image retrieval without text retrieval # you can use `text_to_image_retrieve` # retrieval_results = retriever_engine.text_to_image_retrieve(response)
qa_tmpl_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the query.\n" "Query: {query_str}\n" "Answer: " ) qa_tmpl = PromptTemplate(qa_tmpl_str)