openai_bot_retriever.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from .base import BaseRetriever
  15. from langchain.docstore.document import Document
  16. from langchain.text_splitter import RecursiveCharacterTextSplitter
  17. from langchain_community.vectorstores import FAISS
  18. from langchain_community import vectorstores
  19. import time
  20. from typing import Dict
  21. class OpenAIBotRetriever(BaseRetriever):
  22. """OpenAI Bot Retriever"""
  23. entities = [
  24. "openai",
  25. ]
  26. def __init__(self, config: Dict) -> None:
  27. """
  28. Initializes the OpenAIBotRetriever instance with the provided configuration.
  29. Args:
  30. config (Dict): A dictionary containing configuration settings.
  31. - model_name (str): The name of the model to use.
  32. - api_type (str): The type of API to use ('aistudio', 'qianfan' or 'openai').
  33. - api_key (str, optional): The API key for 'openai' API.
  34. - base_url (str, optional): The base URL for 'openai' API.
  35. Raises:
  36. ValueError: If api_type is not one of ['openai'],
  37. base_url is None for api_type is openai,
  38. api_key is None for api_type is openai.
  39. """
  40. super().__init__()
  41. model_name = config.get("model_name", None)
  42. api_type = config.get("api_type", None)
  43. api_key = config.get("api_key", None)
  44. base_url = config.get("base_url", None)
  45. tiktoken_enabled = config.get("tiktoken_enabled", False)
  46. if api_type not in ["openai"]:
  47. raise ValueError("api_type must be one of ['openai']")
  48. if api_type == "openai" and api_key is None:
  49. raise ValueError("api_key cannot be empty when api_type is openai.")
  50. if base_url is None:
  51. raise ValueError("base_url cannot be empty when api_type is openai.")
  52. try:
  53. from langchain_openai import OpenAIEmbeddings
  54. except:
  55. raise Exception(
  56. "langchain-openai is not installed, please install it first."
  57. )
  58. self.embedding = OpenAIEmbeddings(
  59. model=model_name,
  60. api_key=api_key,
  61. base_url=base_url,
  62. tiktoken_enabled=tiktoken_enabled,
  63. )
  64. self.model_name = model_name
  65. self.config = config
  66. # Generates a vector database from a list of texts using different embeddings based on the configured API type.
  67. def generate_vector_database(
  68. self,
  69. text_list: list[str],
  70. block_size: int = 300,
  71. separators: list[str] = ["\t", "\n", "。", "\n\n", ""],
  72. sleep_time: float = 0.5,
  73. ) -> FAISS:
  74. """
  75. Generates a vector database from a list of texts.
  76. Args:
  77. text_list (list[str]): A list of texts to generate the vector database from.
  78. block_size (int): The size of each chunk to split the text into.
  79. separators (list[str]): A list of separators to use when splitting the text.
  80. sleep_time (float): The time to sleep between embedding generations to avoid rate limiting.
  81. Returns:
  82. FAISS: The generated vector database.
  83. Raises:
  84. ValueError: If an unsupported API type is configured.
  85. """
  86. text_splitter = RecursiveCharacterTextSplitter(
  87. chunk_size=block_size, chunk_overlap=20, separators=separators
  88. )
  89. texts = text_splitter.split_text("\t".join(text_list))
  90. all_splits = [Document(page_content=text) for text in texts]
  91. api_type = self.config["api_type"]
  92. vectorstore = FAISS.from_documents(
  93. documents=all_splits, embedding=self.embedding
  94. )
  95. return vectorstore
  96. def encode_vector_store_to_bytes(self, vectorstore: FAISS) -> str:
  97. """
  98. Encode the vector store serialized to bytes.
  99. Args:
  100. vectorstore (FAISS): The vector store to be serialized and encoded.
  101. Returns:
  102. str: The encoded vector store.
  103. """
  104. vectorstore = self.encode_vector_store(vectorstore.serialize_to_bytes())
  105. return vectorstore
  106. def decode_vector_store_from_bytes(self, vectorstore: str) -> FAISS:
  107. """
  108. Decode a vector store from bytes according to the specified API type.
  109. Args:
  110. vectorstore (str): The serialized vector store string.
  111. Returns:
  112. FAISS: Deserialized vector store object.
  113. Raises:
  114. ValueError: If the retrieved vector store is not for PaddleX
  115. or if an unsupported API type is specified.
  116. """
  117. if not self.is_vector_store(vectorstore):
  118. raise ValueError("The retrieved vectorstore is not for PaddleX.")
  119. vector = vectorstores.FAISS.deserialize_from_bytes(
  120. self.decode_vector_store(vectorstore), self.embedding
  121. )
  122. return vector
  123. def similarity_retrieval(
  124. self, query_text_list: list[str], vectorstore: FAISS, sleep_time: float = 0.5
  125. ) -> str:
  126. """
  127. Retrieve similar contexts based on a list of query texts.
  128. Args:
  129. query_text_list (list[str]): A list of query texts to search for similar contexts.
  130. vectorstore (FAISS): The vector store where to perform the similarity search.
  131. sleep_time (float): The time to sleep between each query, in seconds. Default is 0.5.
  132. Returns:
  133. str: A concatenated string of all unique contexts found.
  134. """
  135. C = []
  136. for query_text in query_text_list:
  137. QUESTION = query_text
  138. time.sleep(sleep_time)
  139. docs = vectorstore.similarity_search_with_relevance_scores(QUESTION, k=2)
  140. context = [(document.page_content, score) for document, score in docs]
  141. context = sorted(context, key=lambda x: x[1])
  142. C.extend([x[0] for x in context[::-1]])
  143. C = list(set(C))
  144. all_C = " ".join(C)
  145. return all_C