| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556 |
- from langchain_openai import ChatOpenAI
- from config import llm_config
- # 数据开发全局大模型
- global_llm: ChatOpenAI = None
- # 初使化模型
- def init_llm() -> ChatOpenAI:
- # 读取配置文件,获取大模型的配置信息
- c = llm_config
- global global_llm
- if global_llm is None:
- """
- 根据配置,创建大模型实例
- """
- global_llm = ChatOpenAI(
- model = c["model"],
- base_url = c["base_url"],
- api_key = c["api_key"],
- temperature= c["temperature"],
- streaming=True
- )
- return global_llm
- # 获取大模型
- def get_llm() -> ChatOpenAI:
- if global_llm is None:
- init_llm()
- # raise ValueError("请先调用 initllm(config)方法初始化llm模型")
- return global_llm
- # 获取大模型
- def get_llm_coder() -> ChatOpenAI:
- c = llm_config
- return ChatOpenAI(
- model=c["coder_model"],
- base_url=c["base_url"],
- api_key=c["api_key"],
- temperature=c["temperature"],
- streaming=True
- )
- if __name__ == '__main__':
- llm = get_llm()
- print(f"llm:", llm)
- # llm = get_llm_coder()
- # response = llm.invoke("你好")
- # print(response)
- question = "你好"
- response = llm.invoke(question)
- print(response)
|