from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE
Given a raw text input to a language model select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.
<< FORMATTING >> Return a markdown code snippet with a JSON object formatted to look like: ```json { "destination": string \ name of the prompt to use or "DEFAULT" "next_inputs": string \ a potentially modified version of the original input } ```
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts. REMEMBER: "next_inputs" can just be the original input if you don't think any modifications are needed.
from langchain.output_parsers import StructuredOutputParser, ResponseSchema from langchain_core.prompts import PromptTemplate from langchain_openai import ChatOpenAI
from langchain.output_parsers import StructuredOutputParser, ResponseSchema from langchain_core.prompts import PromptTemplate from langchain_openai import ChatOpenAI
response_schemas = [ ResponseSchema(name="content", description="The original content"), ResponseSchema(name="summary", description="The summary of the content"), ]
client.chat.completions.create( model='gpt-3.5-turbo', messages=[ {'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'What is the capital of France?'}, {'role': 'assistant', 'content': 'The capital of France is Paris.'} ] )
from langchain.chat_models import ChatOpenAI llm = ChatOpenAI( model='gpt-3.5-turbo', temperature=0.5, max_tokens=200 ) res = llm.invoke(prompt.format(flower_type="野玫瑰", occasion="爱情")) print(res)
选择最相似的样本
可以使用向量搜索来选择最相似的样本,这样模型就可以根据这个样本生成广告文案。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
from langchain.prompts.example_selector import SemanticSimilarityExampleSelector from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings
tool = """ 1 tool: python_interpreter, description: use it to execute python code 2 tool: web_access, description: use it to get realtime info, input is the question or query """
react_prompt = f""" Try your best to answer user's question, and use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should use one of tools in the given tool list: [{tool}] Action Input: the input to the action Here, you should pause the process and return to wait the outside observation. Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question """
Thought: We can use web access to find the answer to this question.
Action: web_access
Action Input: "capital of France"
Observation: The capital of France is Paris.
Thought: I now know the final answer.
Final Answer: The capital of France is Paris.
我们可以看到,LLM 如期返回了正确的答案。
另外一个例子:
1
react_demo("广州今天适合穿什么?")
输出:
1 2 3 4 5 6 7 8 9 10 11 12
Question: What should I wear in Guangzhou today?
Thought: We need to check the current weather in Guangzhou to determine what would be suitable to wear.
Action: web_access Action Input: current weather in Guangzhou
Observation: The current weather in Guangzhou is 28°C with scattered thunderstorms.
Thought: Based on the weather information, it would be best to wear light and breathable clothing along with an umbrella in case of rain.
Final Answer: It is recommended to wear light and breathable clothing with an umbrella in Guangzhou today due to the scattered thunderstorms and 28°C temperature.
from langchain_openai import ChatOpenAI llm = ChatOpenAI( model_name="gpt-4", temperature=0.7, max_tokens=1000, ) from langchain.agents import Tool
tools = [ Tool( name="Get current info", func=query_web, description="""only invoke it when you need to answer question about realtime info. And the input should be a search query.""" ), Tool( name="query spotmax info", func=qa, description="""only invoke it when you need to get the info about spotmax/maxgroup/maxarch/maxchaos. And the input should be the question.""" ), Tool( name="create an image", func=create_image, description="""invoke it when you need to create an image. And the input should be the description of the image.""" ) ] from langchain.memory import ConversationBufferWindowMemory from langchain.agents import ZeroShotAgent, AgentExecutor from langchain.chains.llm import LLMChain
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:""" suffix = """Begin!" {chat_history} Question: {input} {agent_scratchpad}"""
AgentExecutor
的处理过程如下(Thought -> Action -> Observation -> Thought -> Final Answer):
1 2 3 4 5 6 7 8 9 10 11 12 13
> Entering new AgentExecutor chain... Thought: The question is asking for the current weather in Guangzhou and a male outfit recommendation. I can use the 'Get current info' tool to find the weather, and the 'create an image' tool to generate the outfit image. Action: Get current info Action Input: Guangzhou weather today Observation: 94°F Thought:The weather in Guangzhou is quite hot today. Now I need to think of an outfit that would be suitable for such warm weather. Action: create an image Action Input: A light summer outfit for men suitable for 94°F weather Observation:  Thought:I now have the final answer. Final Answer: 广州今天的天气很热,达到了94°F。我为你创建了一张适合这种天气的男士夏季轻便穿搭图。请参考图片中的服装搭配。
> Finished chain.
我们可以看到在我提这个问题的时候,它做了如下操作:
思考,然后发现需要获取今天广州的天气,这是 LLM 不懂的,所以使用了
Get current info 工具。
获取到了天气信息之后,思考,然后发现需要生成一张图片,而我们有一个
create an image 工具,因此使用了这个工具来生成图片
最终返回了今天广州的天气状况以及一张图片。
当然,我们也可以问它关于本地知识库的问题,比如 “什么是
spotmax?”(根据你自己的 pdf 提问,这里只是一个示例)
from langchain_openai import ChatOpenAI llm = ChatOpenAI( model_name="gpt-4", temperature=0.7, max_tokens=1000, ) from langchain.agents import Tool
tools = [ Tool( name="Get current info", func=query_web, description="""only invoke it when you need to answer question about realtime info. And the input should be a search query.""" ), Tool( name="query spotmax info", func=qa, description="""only invoke it when you need to get the info about spotmax/maxgroup/maxarch/maxchaos. And the input should be the question.""" ), Tool( name="create an image", func=create_image, description="""invoke it when you need to create an image. And the input should be the description of the image.""" ) ] from langchain.memory import ConversationBufferWindowMemory from langchain.agents import ZeroShotAgent, AgentExecutor from langchain.chains.llm import LLMChain
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:""" suffix = """Begin!" {chat_history} Question: {input} {agent_scratchpad}"""