llm.py 1.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. from langchain_openai import ChatOpenAI
  2. from config import llm_config
  3. # 数据开发全局大模型
  4. global_llm: ChatOpenAI = None
  5. # 初使化模型
  6. def init_llm() -> ChatOpenAI:
  7. # 读取配置文件,获取大模型的配置信息
  8. c = llm_config
  9. global global_llm
  10. if global_llm is None:
  11. """
  12. 根据配置,创建大模型实例
  13. """
  14. global_llm = ChatOpenAI(
  15. model = c["model"],
  16. base_url = c["base_url"],
  17. api_key = c["api_key"],
  18. temperature= c["temperature"],
  19. streaming=True
  20. )
  21. return global_llm
  22. # 获取大模型
  23. def get_llm() -> ChatOpenAI:
  24. if global_llm is None:
  25. init_llm()
  26. # raise ValueError("请先调用 initllm(config)方法初始化llm模型")
  27. return global_llm
  28. # 获取大模型
  29. def get_llm_coder() -> ChatOpenAI:
  30. c = llm_config
  31. return ChatOpenAI(
  32. model=c["coder_model"],
  33. base_url=c["base_url"],
  34. api_key=c["api_key"],
  35. temperature=c["temperature"],
  36. streaming=True
  37. )
  38. if __name__ == '__main__':
  39. llm = get_llm()
  40. print(f"llm:", llm)
  41. # llm = get_llm_coder()
  42. # response = llm.invoke("你好")
  43. # print(response)
  44. question = "你好"
  45. response = llm.invoke(question)
  46. print(response)