ernie_bot_retriever.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Dict
  15. import time
  16. import os
  17. from langchain.docstore.document import Document
  18. from langchain.text_splitter import RecursiveCharacterTextSplitter
  19. from langchain_community.embeddings import QianfanEmbeddingsEndpoint
  20. from langchain_community.vectorstores import FAISS
  21. from langchain_community import vectorstores
  22. from erniebot_agent.extensions.langchain.embeddings import ErnieEmbeddings
  23. from .base import BaseRetriever
  24. class ErnieBotRetriever(BaseRetriever):
  25. """Ernie Bot Retriever"""
  26. entities = [
  27. "aistudio",
  28. "qianfan",
  29. ]
  30. MODELS = [
  31. "ernie-4.0",
  32. "ernie-3.5",
  33. "ernie-3.5-8k",
  34. "ernie-lite",
  35. "ernie-tiny-8k",
  36. "ernie-speed",
  37. "ernie-speed-128k",
  38. "ernie-char-8k",
  39. ]
  40. def __init__(self, config: Dict) -> None:
  41. """
  42. Initializes the ErnieBotRetriever instance with the provided configuration.
  43. Args:
  44. config (Dict): A dictionary containing configuration settings.
  45. - model_name (str): The name of the model to use.
  46. - api_type (str): The type of API to use ('aistudio', 'qianfan' or 'openai').
  47. - ak (str, optional): The access key for 'qianfan' API.
  48. - sk (str, optional): The secret key for 'qianfan' API.
  49. - access_token (str, optional): The access token for 'aistudio' API.
  50. Raises:
  51. ValueError: If model_name is not in self.entities,
  52. api_type is not 'aistudio' or 'qianfan',
  53. access_token is missing for 'aistudio' API,
  54. or ak and sk are missing for 'qianfan' API.
  55. """
  56. super().__init__()
  57. model_name = config.get("model_name", None)
  58. api_type = config.get("api_type", None)
  59. ak = config.get("ak", None)
  60. sk = config.get("sk", None)
  61. access_token = config.get("access_token", None)
  62. if model_name not in self.MODELS:
  63. raise ValueError(f"model_name must be in {self.MODELS} of ErnieBotChat.")
  64. if api_type not in ["aistudio", "qianfan"]:
  65. raise ValueError("api_type must be one of ['aistudio', 'qianfan']")
  66. if api_type == "aistudio" and access_token is None:
  67. raise ValueError("access_token cannot be empty when api_type is aistudio.")
  68. if api_type == "qianfan" and (ak is None or sk is None):
  69. raise ValueError("ak and sk cannot be empty when api_type is qianfan.")
  70. self.model_name = model_name
  71. self.config = config
  72. # Generates a vector database from a list of texts using different embeddings based on the configured API type.
  73. def generate_vector_database(
  74. self,
  75. text_list: list[str],
  76. block_size: int = 300,
  77. separators: list[str] = ["\t", "\n", "。", "\n\n", ""],
  78. sleep_time: float = 0.5,
  79. ) -> FAISS:
  80. """
  81. Generates a vector database from a list of texts.
  82. Args:
  83. text_list (list[str]): A list of texts to generate the vector database from.
  84. block_size (int): The size of each chunk to split the text into.
  85. separators (list[str]): A list of separators to use when splitting the text.
  86. sleep_time (float): The time to sleep between embedding generations to avoid rate limiting.
  87. Returns:
  88. FAISS: The generated vector database.
  89. Raises:
  90. ValueError: If an unsupported API type is configured.
  91. """
  92. text_splitter = RecursiveCharacterTextSplitter(
  93. chunk_size=block_size, chunk_overlap=20, separators=separators
  94. )
  95. texts = text_splitter.split_text("\t".join(text_list))
  96. all_splits = [Document(page_content=text) for text in texts]
  97. api_type = self.config["api_type"]
  98. if api_type == "qianfan":
  99. os.environ["QIANFAN_AK"] = os.environ.get("EB_AK", self.config["ak"])
  100. os.environ["QIANFAN_SK"] = os.environ.get("EB_SK", self.config["sk"])
  101. user_ak = os.environ.get("EB_AK", self.config["ak"])
  102. user_id = hash(user_ak)
  103. vectorstore = FAISS.from_documents(
  104. documents=all_splits, embedding=QianfanEmbeddingsEndpoint()
  105. )
  106. elif api_type == "aistudio":
  107. token = self.config["access_token"]
  108. vectorstore = FAISS.from_documents(
  109. documents=all_splits[0:1],
  110. embedding=ErnieEmbeddings(aistudio_access_token=token),
  111. )
  112. #### ErnieEmbeddings.chunk_size = 16
  113. step = min(16, len(all_splits) - 1)
  114. for shot_splits in [
  115. all_splits[i : i + step] for i in range(1, len(all_splits), step)
  116. ]:
  117. time.sleep(sleep_time)
  118. vectorstore_slice = FAISS.from_documents(
  119. documents=shot_splits,
  120. embedding=ErnieEmbeddings(aistudio_access_token=token),
  121. )
  122. vectorstore.merge_from(vectorstore_slice)
  123. else:
  124. raise ValueError(f"Unsupported api_type: {api_type}")
  125. return vectorstore
  126. def encode_vector_store_to_bytes(self, vectorstore: FAISS) -> str:
  127. """
  128. Encode the vector store serialized to bytes.
  129. Args:
  130. vectorstore (FAISS): The vector store to be serialized and encoded.
  131. Returns:
  132. str: The encoded vector store.
  133. """
  134. vectorstore = self.encode_vector_store(vectorstore.serialize_to_bytes())
  135. return vectorstore
  136. def decode_vector_store_from_bytes(self, vectorstore: str) -> FAISS:
  137. """
  138. Decode a vector store from bytes according to the specified API type.
  139. Args:
  140. vectorstore (str): The serialized vector store string.
  141. Returns:
  142. FAISS: Deserialized vector store object.
  143. Raises:
  144. ValueError: If the retrieved vector store is not for PaddleX
  145. or if an unsupported API type is specified.
  146. """
  147. if not self.is_vector_store(vectorstore):
  148. raise ValueError("The retrieved vectorstore is not for PaddleX.")
  149. api_type = self.config["api_type"]
  150. if api_type == "aistudio":
  151. access_token = self.config["access_token"]
  152. embeddings = ErnieEmbeddings(aistudio_access_token=access_token)
  153. elif api_type == "qianfan":
  154. ak = self.config["ak"]
  155. sk = self.config["sk"]
  156. embeddings = QianfanEmbeddingsEndpoint(qianfan_ak=ak, qianfan_sk=sk)
  157. else:
  158. raise ValueError(f"Unsupported api_type: {api_type}")
  159. vector = vectorstores.FAISS.deserialize_from_bytes(
  160. self.decode_vector_store(vectorstore), embeddings
  161. )
  162. return vector
  163. def similarity_retrieval(
  164. self,
  165. query_text_list: list[str],
  166. vectorstore: FAISS,
  167. sleep_time: float = 0.5,
  168. topk: int = 2,
  169. min_characters: int = 3500,
  170. ) -> str:
  171. """
  172. Retrieve similar contexts based on a list of query texts.
  173. Args:
  174. query_text_list (list[str]): A list of query texts to search for similar contexts.
  175. vectorstore (FAISS): The vector store where to perform the similarity search.
  176. sleep_time (float): The time to sleep between each query, in seconds. Default is 0.5.
  177. topk (int): The number of results to retrieve per query. Default is 2.
  178. min_characters (int): The minimum number of characters required for text processing, defaults to 3500.
  179. Returns:
  180. str: A concatenated string of all unique contexts found.
  181. """
  182. C = []
  183. for query_text in query_text_list:
  184. QUESTION = query_text
  185. time.sleep(sleep_time)
  186. docs = vectorstore.similarity_search_with_relevance_scores(QUESTION, k=topk)
  187. context = [(document.page_content, score) for document, score in docs]
  188. context = sorted(context, key=lambda x: x[1])
  189. C.extend([x[0] for x in context[::-1]])
  190. C = list(set(C))
  191. all_C = " ".join(C)
  192. return all_C