async_client.py 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906
  1. """The Async LangSmith Client."""
  2. from __future__ import annotations
  3. import asyncio
  4. import contextlib
  5. import datetime
  6. import json
  7. import uuid
  8. import warnings
  9. from collections.abc import AsyncGenerator, AsyncIterator, Mapping, Sequence
  10. from typing import (
  11. Any,
  12. Literal,
  13. Optional,
  14. Union,
  15. cast,
  16. )
  17. import httpx
  18. from langsmith import client as ls_client
  19. from langsmith import schemas as ls_schemas
  20. from langsmith import utils as ls_utils
  21. from langsmith._internal import _beta_decorator as ls_beta
  22. ID_TYPE = Union[uuid.UUID, str]
  23. class AsyncClient:
  24. """Async Client for interacting with the LangSmith API."""
  25. __slots__ = ("_retry_config", "_client", "_web_url", "_settings")
  26. def __init__(
  27. self,
  28. api_url: Optional[str] = None,
  29. api_key: Optional[str] = None,
  30. timeout_ms: Optional[
  31. Union[
  32. int, tuple[Optional[int], Optional[int], Optional[int], Optional[int]]
  33. ]
  34. ] = None,
  35. retry_config: Optional[Mapping[str, Any]] = None,
  36. web_url: Optional[str] = None,
  37. ):
  38. """Initialize the async client."""
  39. self._retry_config = retry_config or {"max_retries": 3}
  40. _headers = {
  41. "Content-Type": "application/json",
  42. }
  43. api_key = ls_utils.get_api_key(api_key)
  44. api_url = ls_utils.get_api_url(api_url)
  45. if api_key:
  46. _headers[ls_client.X_API_KEY] = api_key
  47. ls_client._validate_api_key_if_hosted(api_url, api_key)
  48. if isinstance(timeout_ms, int):
  49. timeout_: Union[tuple, float] = (timeout_ms / 1000, None, None, None)
  50. elif isinstance(timeout_ms, tuple):
  51. timeout_ = tuple([t / 1000 if t is not None else None for t in timeout_ms])
  52. else:
  53. timeout_ = 10
  54. self._client = httpx.AsyncClient(
  55. base_url=api_url, headers=_headers, timeout=timeout_
  56. )
  57. self._web_url = web_url
  58. self._settings: Optional[ls_schemas.LangSmithSettings] = None
  59. async def __aenter__(self) -> AsyncClient:
  60. """Enter the async client."""
  61. return self
  62. async def __aexit__(self, exc_type, exc_val, exc_tb):
  63. """Exit the async client."""
  64. await self.aclose()
  65. async def aclose(self):
  66. """Close the async client."""
  67. await self._client.aclose()
  68. @property
  69. def _api_url(self):
  70. return str(self._client.base_url)
  71. @property
  72. def _host_url(self) -> str:
  73. """The web host url."""
  74. return ls_utils.get_host_url(self._web_url, self._api_url)
  75. async def _arequest_with_retries(
  76. self,
  77. method: str,
  78. endpoint: str,
  79. **kwargs: Any,
  80. ) -> httpx.Response:
  81. """Make an async HTTP request with retries."""
  82. max_retries = cast(int, self._retry_config.get("max_retries", 3))
  83. # Python requests library used by the normal Client filters out params with None values
  84. # The httpx library does not. Filter them out here to keep behavior consistent
  85. if "params" in kwargs:
  86. params = kwargs["params"]
  87. filtered_params = {k: v for k, v in params.items() if v is not None}
  88. kwargs["params"] = filtered_params
  89. for attempt in range(max_retries):
  90. try:
  91. response = await self._client.request(method, endpoint, **kwargs)
  92. ls_utils.raise_for_status_with_text(response)
  93. return response
  94. except httpx.HTTPStatusError as e:
  95. if response.status_code == 500:
  96. raise ls_utils.LangSmithAPIError(
  97. f"Server error caused failure to {method}"
  98. f" {endpoint} in"
  99. f" LangSmith API. {repr(e)}"
  100. )
  101. elif response.status_code == 408:
  102. raise ls_utils.LangSmithRequestTimeout(
  103. f"Client took too long to send request to {method}{endpoint}"
  104. )
  105. elif response.status_code == 429:
  106. raise ls_utils.LangSmithRateLimitError(
  107. f"Rate limit exceeded for {endpoint}. {repr(e)}"
  108. )
  109. elif response.status_code == 401:
  110. raise ls_utils.LangSmithAuthError(
  111. f"Authentication failed for {endpoint}. {repr(e)}"
  112. )
  113. elif response.status_code == 404:
  114. raise ls_utils.LangSmithNotFoundError(
  115. f"Resource not found for {endpoint}. {repr(e)}"
  116. )
  117. elif response.status_code == 409:
  118. raise ls_utils.LangSmithConflictError(
  119. f"Conflict for {endpoint}. {repr(e)}"
  120. )
  121. else:
  122. raise ls_utils.LangSmithError(
  123. f"Failed to {method} {endpoint} in LangSmith API. {repr(e)}"
  124. )
  125. except httpx.RequestError as e:
  126. if attempt == max_retries - 1:
  127. raise ls_utils.LangSmithConnectionError(f"Request error: {repr(e)}")
  128. await asyncio.sleep(2**attempt)
  129. raise ls_utils.LangSmithAPIError(
  130. "Unexpected error connecting to the LangSmith API"
  131. )
  132. async def _aget_paginated_list(
  133. self,
  134. path: str,
  135. params: Optional[dict[str, Any]] = None,
  136. ) -> AsyncIterator[dict[str, Any]]:
  137. """Get a paginated list of items."""
  138. params = params or {}
  139. offset = params.get("offset", 0)
  140. params["limit"] = params.get("limit", 100)
  141. while True:
  142. params["offset"] = offset
  143. response = await self._arequest_with_retries("GET", path, params=params)
  144. items = response.json()
  145. if not items:
  146. break
  147. for item in items:
  148. yield item
  149. if len(items) < params["limit"]:
  150. break
  151. offset += len(items)
  152. async def _aget_cursor_paginated_list(
  153. self,
  154. path: str,
  155. *,
  156. body: Optional[dict] = None,
  157. request_method: str = "POST",
  158. data_key: str = "runs",
  159. ) -> AsyncIterator[dict]:
  160. """Get a cursor paginated list of items."""
  161. params_ = body.copy() if body else {}
  162. while True:
  163. response = await self._arequest_with_retries(
  164. request_method,
  165. path,
  166. content=ls_client._dumps_json(params_),
  167. )
  168. response_body = response.json()
  169. if not response_body:
  170. break
  171. if not response_body.get(data_key):
  172. break
  173. for run in response_body[data_key]:
  174. yield run
  175. cursors = response_body.get("cursors")
  176. if not cursors:
  177. break
  178. if not cursors.get("next"):
  179. break
  180. params_["cursor"] = cursors["next"]
  181. async def create_run(
  182. self,
  183. name: str,
  184. inputs: dict[str, Any],
  185. run_type: str,
  186. *,
  187. project_name: Optional[str] = None,
  188. revision_id: Optional[ls_client.ID_TYPE] = None,
  189. **kwargs: Any,
  190. ) -> None:
  191. """Create a run."""
  192. run_create = {
  193. "name": name,
  194. "id": kwargs.get("id") or uuid.uuid4(),
  195. "inputs": inputs,
  196. "run_type": run_type,
  197. "session_name": project_name or ls_utils.get_tracer_project(),
  198. "revision_id": revision_id,
  199. **kwargs,
  200. }
  201. await self._arequest_with_retries(
  202. "POST", "/runs", content=ls_client._dumps_json(run_create)
  203. )
  204. async def update_run(
  205. self,
  206. run_id: ls_client.ID_TYPE,
  207. **kwargs: Any,
  208. ) -> None:
  209. """Update a run."""
  210. data = {**kwargs, "id": ls_client._as_uuid(run_id)}
  211. await self._arequest_with_retries(
  212. "PATCH",
  213. f"/runs/{ls_client._as_uuid(run_id)}",
  214. content=ls_client._dumps_json(data),
  215. )
  216. async def read_run(self, run_id: ls_client.ID_TYPE) -> ls_schemas.Run:
  217. """Read a run."""
  218. response = await self._arequest_with_retries(
  219. "GET",
  220. f"/runs/{ls_client._as_uuid(run_id)}",
  221. )
  222. return ls_schemas.Run(**response.json())
  223. async def list_runs(
  224. self,
  225. *,
  226. project_id: Optional[
  227. Union[ls_client.ID_TYPE, Sequence[ls_client.ID_TYPE]]
  228. ] = None,
  229. project_name: Optional[Union[str, Sequence[str]]] = None,
  230. run_type: Optional[str] = None,
  231. trace_id: Optional[ls_client.ID_TYPE] = None,
  232. reference_example_id: Optional[ls_client.ID_TYPE] = None,
  233. query: Optional[str] = None,
  234. filter: Optional[str] = None,
  235. trace_filter: Optional[str] = None,
  236. tree_filter: Optional[str] = None,
  237. is_root: Optional[bool] = None,
  238. parent_run_id: Optional[ls_client.ID_TYPE] = None,
  239. start_time: Optional[datetime.datetime] = None,
  240. error: Optional[bool] = None,
  241. run_ids: Optional[Sequence[ls_client.ID_TYPE]] = None,
  242. select: Optional[Sequence[str]] = None,
  243. limit: Optional[int] = None,
  244. **kwargs: Any,
  245. ) -> AsyncIterator[ls_schemas.Run]:
  246. """List runs from the LangSmith API.
  247. Parameters
  248. ----------
  249. project_id : UUID or None, default=None
  250. The ID(s) of the project to filter by.
  251. project_name : str or None, default=None
  252. The name(s) of the project to filter by.
  253. run_type : str or None, default=None
  254. The type of the runs to filter by.
  255. trace_id : UUID or None, default=None
  256. The ID of the trace to filter by.
  257. reference_example_id : UUID or None, default=None
  258. The ID of the reference example to filter by.
  259. query : str or None, default=None
  260. The query string to filter by.
  261. filter : str or None, default=None
  262. The filter string to filter by.
  263. trace_filter : str or None, default=None
  264. Filter to apply to the ROOT run in the trace tree. This is meant to
  265. be used in conjunction with the regular `filter` parameter to let you
  266. filter runs by attributes of the root run within a trace.
  267. tree_filter : str or None, default=None
  268. Filter to apply to OTHER runs in the trace tree, including
  269. sibling and child runs. This is meant to be used in conjunction with
  270. the regular `filter` parameter to let you filter runs by attributes
  271. of any run within a trace.
  272. is_root : bool or None, default=None
  273. Whether to filter by root runs.
  274. parent_run_id : UUID or None, default=None
  275. The ID of the parent run to filter by.
  276. start_time : datetime or None, default=None
  277. The start time to filter by.
  278. error : bool or None, default=None
  279. Whether to filter by error status.
  280. run_ids : List[str or UUID] or None, default=None
  281. The IDs of the runs to filter by.
  282. limit : int or None, default=None
  283. The maximum number of runs to return.
  284. **kwargs : Any
  285. Additional keyword arguments.
  286. Yields:
  287. ------
  288. Run
  289. The runs.
  290. Examples:
  291. --------
  292. List all runs in a project:
  293. ```python
  294. project_runs = client.list_runs(project_name="<your_project>")
  295. ```
  296. List LLM and Chat runs in the last 24 hours:
  297. ```python
  298. todays_llm_runs = client.list_runs(
  299. project_name="<your_project>",
  300. start_time=datetime.now() - timedelta(days=1),
  301. run_type="llm",
  302. )
  303. ```
  304. List root traces in a project:
  305. ```python
  306. root_runs = client.list_runs(project_name="<your_project>", is_root=1)
  307. ```
  308. List runs without errors:
  309. ```python
  310. correct_runs = client.list_runs(project_name="<your_project>", error=False)
  311. ```
  312. List runs and only return their inputs/outputs (to speed up the query):
  313. ```python
  314. input_output_runs = client.list_runs(
  315. project_name="<your_project>", select=["inputs", "outputs"]
  316. )
  317. ```
  318. List runs by run ID:
  319. ```python
  320. run_ids = [
  321. "a36092d2-4ad5-4fb4-9c0d-0dba9a2ed836",
  322. "9398e6be-964f-4aa4-8ae9-ad78cd4b7074",
  323. ]
  324. selected_runs = client.list_runs(id=run_ids)
  325. ```
  326. List all "chain" type runs that took more than 10 seconds and had
  327. `total_tokens` greater than 5000:
  328. ```python
  329. chain_runs = client.list_runs(
  330. project_name="<your_project>",
  331. filter='and(eq(run_type, "chain"), gt(latency, 10), gt(total_tokens, 5000))',
  332. )
  333. ```
  334. List all runs called "extractor" whose root of the trace was assigned feedback "user_score" score of 1:
  335. ```python
  336. good_extractor_runs = client.list_runs(
  337. project_name="<your_project>",
  338. filter='eq(name, "extractor")',
  339. trace_filter='and(eq(feedback_key, "user_score"), eq(feedback_score, 1))',
  340. )
  341. ```
  342. List all runs that started after a specific timestamp and either have "error" not equal to null or a "Correctness" feedback score equal to 0:
  343. ```python
  344. complex_runs = client.list_runs(
  345. project_name="<your_project>",
  346. filter='and(gt(start_time, "2023-07-15T12:34:56Z"), or(neq(error, null), and(eq(feedback_key, "Correctness"), eq(feedback_score, 0.0))))',
  347. )
  348. ```
  349. List all runs where `tags` include "experimental" or "beta" and `latency` is greater than 2 seconds:
  350. ```python
  351. tagged_runs = client.list_runs(
  352. project_name="<your_project>",
  353. filter='and(or(has(tags, "experimental"), has(tags, "beta")), gt(latency, 2))',
  354. )
  355. ```
  356. """
  357. project_ids = []
  358. if isinstance(project_id, (uuid.UUID, str)):
  359. project_ids.append(project_id)
  360. elif isinstance(project_id, list):
  361. project_ids.extend(project_id)
  362. if project_name is not None:
  363. if isinstance(project_name, str):
  364. project_name = [project_name]
  365. projects = await asyncio.gather(
  366. *[self.read_project(project_name=name) for name in project_name]
  367. )
  368. project_ids.extend([project.id for project in projects])
  369. if select and "child_run_ids" in select:
  370. warnings.warn(
  371. "The child_run_ids field is deprecated and will be removed in following versions",
  372. DeprecationWarning,
  373. )
  374. body_query: dict[str, Any] = {
  375. "session": project_ids if project_ids else None,
  376. "run_type": run_type,
  377. "reference_example": (
  378. [reference_example_id] if reference_example_id else None
  379. ),
  380. "query": query,
  381. "filter": filter,
  382. "trace_filter": trace_filter,
  383. "tree_filter": tree_filter,
  384. "is_root": is_root,
  385. "parent_run": parent_run_id,
  386. "start_time": start_time.isoformat() if start_time else None,
  387. "error": error,
  388. "id": run_ids,
  389. "trace": trace_id,
  390. "select": select,
  391. "limit": limit,
  392. **kwargs,
  393. }
  394. if project_ids:
  395. body_query["session"] = [
  396. str(ls_client._as_uuid(id_)) for id_ in project_ids
  397. ]
  398. body = {k: v for k, v in body_query.items() if v is not None}
  399. ix = 0
  400. async for run in self._aget_cursor_paginated_list("/runs/query", body=body):
  401. yield ls_schemas.Run(**run)
  402. ix += 1
  403. if limit is not None and ix >= limit:
  404. break
  405. async def share_run(
  406. self, run_id: ls_client.ID_TYPE, *, share_id: Optional[ls_client.ID_TYPE] = None
  407. ) -> str:
  408. """Get a share link for a run asynchronously.
  409. Args:
  410. run_id (ID_TYPE): The ID of the run to share.
  411. share_id (Optional[ID_TYPE], optional): Custom share ID.
  412. If not provided, a random UUID will be generated.
  413. Returns:
  414. str: The URL of the shared run.
  415. Raises:
  416. httpx.HTTPStatusError: If the API request fails.
  417. """
  418. run_id_ = ls_client._as_uuid(run_id, "run_id")
  419. data = {
  420. "run_id": str(run_id_),
  421. "share_token": str(share_id or uuid.uuid4()),
  422. }
  423. response = await self._arequest_with_retries(
  424. "PUT",
  425. f"/runs/{run_id_}/share",
  426. content=ls_client._dumps_json(data),
  427. )
  428. ls_utils.raise_for_status_with_text(response)
  429. share_token = response.json()["share_token"]
  430. return f"{self._host_url}/public/{share_token}/r"
  431. async def run_is_shared(self, run_id: ls_client.ID_TYPE) -> bool:
  432. """Get share state for a run asynchronously."""
  433. link = await self.read_run_shared_link(ls_client._as_uuid(run_id, "run_id"))
  434. return link is not None
  435. async def read_run_shared_link(self, run_id: ls_client.ID_TYPE) -> Optional[str]:
  436. """Retrieve the shared link for a specific run asynchronously.
  437. Args:
  438. run_id (ID_TYPE): The ID of the run.
  439. Returns:
  440. Optional[str]: The shared link for the run, or None if the link is not
  441. available.
  442. Raises:
  443. httpx.HTTPStatusError: If the API request fails.
  444. """
  445. response = await self._arequest_with_retries(
  446. "GET",
  447. f"/runs/{ls_client._as_uuid(run_id, 'run_id')}/share",
  448. )
  449. ls_utils.raise_for_status_with_text(response)
  450. result = response.json()
  451. if result is None or "share_token" not in result:
  452. return None
  453. return f"{self._host_url}/public/{result['share_token']}/r"
  454. async def create_project(
  455. self,
  456. project_name: str,
  457. **kwargs: Any,
  458. ) -> ls_schemas.TracerSession:
  459. """Create a project."""
  460. data = {"name": project_name, **kwargs}
  461. response = await self._arequest_with_retries(
  462. "POST", "/sessions", content=ls_client._dumps_json(data)
  463. )
  464. return ls_schemas.TracerSession(**response.json())
  465. async def read_project(
  466. self,
  467. project_name: Optional[str] = None,
  468. project_id: Optional[ls_client.ID_TYPE] = None,
  469. ) -> ls_schemas.TracerSession:
  470. """Read a project."""
  471. if project_id:
  472. response = await self._arequest_with_retries(
  473. "GET", f"/sessions/{ls_client._as_uuid(project_id)}"
  474. )
  475. elif project_name:
  476. response = await self._arequest_with_retries(
  477. "GET", "/sessions", params={"name": project_name}
  478. )
  479. else:
  480. raise ValueError("Either project_name or project_id must be provided")
  481. data = response.json()
  482. if isinstance(data, list):
  483. if not data:
  484. raise ls_utils.LangSmithNotFoundError(
  485. f"Project {project_name} not found"
  486. )
  487. return ls_schemas.TracerSession(**data[0])
  488. return ls_schemas.TracerSession(**data)
  489. async def delete_project(
  490. self, *, project_name: Optional[str] = None, project_id: Optional[str] = None
  491. ) -> None:
  492. """Delete a project from LangSmith.
  493. Parameters
  494. ----------
  495. project_name : str or None, default=None
  496. The name of the project to delete.
  497. project_id : str or None, default=None
  498. The ID of the project to delete.
  499. """
  500. if project_id is None and project_name is None:
  501. raise ValueError("Either project_name or project_id must be provided")
  502. if project_id is None:
  503. project = await self.read_project(project_name=project_name)
  504. project_id = str(project.id)
  505. if not project_id:
  506. raise ValueError("Project not found")
  507. await self._arequest_with_retries(
  508. "DELETE",
  509. f"/sessions/{ls_client._as_uuid(project_id)}",
  510. )
  511. async def create_dataset(
  512. self,
  513. dataset_name: str,
  514. **kwargs: Any,
  515. ) -> ls_schemas.Dataset:
  516. """Create a dataset."""
  517. data = {"name": dataset_name, **kwargs}
  518. response = await self._arequest_with_retries(
  519. "POST", "/datasets", content=ls_client._dumps_json(data)
  520. )
  521. return ls_schemas.Dataset(**response.json())
  522. async def read_dataset(
  523. self,
  524. dataset_name: Optional[str] = None,
  525. dataset_id: Optional[ls_client.ID_TYPE] = None,
  526. ) -> ls_schemas.Dataset:
  527. """Read a dataset."""
  528. if dataset_id:
  529. response = await self._arequest_with_retries(
  530. "GET", f"/datasets/{ls_client._as_uuid(dataset_id)}"
  531. )
  532. elif dataset_name:
  533. response = await self._arequest_with_retries(
  534. "GET", "/datasets", params={"name": dataset_name}
  535. )
  536. else:
  537. raise ValueError("Either dataset_name or dataset_id must be provided")
  538. data = response.json()
  539. if isinstance(data, list):
  540. if not data:
  541. raise ls_utils.LangSmithNotFoundError(
  542. f"Dataset {dataset_name} not found"
  543. )
  544. return ls_schemas.Dataset(**data[0])
  545. return ls_schemas.Dataset(**data)
  546. async def delete_dataset(self, dataset_id: ls_client.ID_TYPE) -> None:
  547. """Delete a dataset."""
  548. await self._arequest_with_retries(
  549. "DELETE",
  550. f"/datasets/{ls_client._as_uuid(dataset_id)}",
  551. )
  552. async def list_datasets(
  553. self,
  554. **kwargs: Any,
  555. ) -> AsyncIterator[ls_schemas.Dataset]:
  556. """List datasets."""
  557. async for dataset in self._aget_paginated_list("/datasets", params=kwargs):
  558. yield ls_schemas.Dataset(**dataset)
  559. async def create_example(
  560. self,
  561. inputs: dict[str, Any],
  562. outputs: Optional[dict[str, Any]] = None,
  563. dataset_id: Optional[ls_client.ID_TYPE] = None,
  564. dataset_name: Optional[str] = None,
  565. **kwargs: Any,
  566. ) -> ls_schemas.Example:
  567. """Create an example."""
  568. if dataset_id is None and dataset_name is None:
  569. raise ValueError("Either dataset_id or dataset_name must be provided")
  570. if dataset_id is None:
  571. dataset = await self.read_dataset(dataset_name=dataset_name)
  572. dataset_id = dataset.id
  573. data = {
  574. "inputs": inputs,
  575. "outputs": outputs,
  576. "dataset_id": str(dataset_id),
  577. **kwargs,
  578. }
  579. response = await self._arequest_with_retries(
  580. "POST", "/examples", content=ls_client._dumps_json(data)
  581. )
  582. return ls_schemas.Example(**response.json())
  583. async def read_example(self, example_id: ls_client.ID_TYPE) -> ls_schemas.Example:
  584. """Read an example."""
  585. response = await self._arequest_with_retries(
  586. "GET", f"/examples/{ls_client._as_uuid(example_id)}"
  587. )
  588. return ls_schemas.Example(**response.json())
  589. async def list_examples(
  590. self,
  591. *,
  592. dataset_id: Optional[ls_client.ID_TYPE] = None,
  593. dataset_name: Optional[str] = None,
  594. **kwargs: Any,
  595. ) -> AsyncIterator[ls_schemas.Example]:
  596. """List examples."""
  597. params = kwargs.copy()
  598. if dataset_id:
  599. params["dataset"] = ls_client._as_uuid(dataset_id)
  600. elif dataset_name:
  601. dataset = await self.read_dataset(dataset_name=dataset_name)
  602. params["dataset"] = dataset.id
  603. async for example in self._aget_paginated_list("/examples", params=params):
  604. yield ls_schemas.Example(**example)
  605. async def create_feedback(
  606. self,
  607. run_id: Optional[ls_client.ID_TYPE],
  608. key: str,
  609. score: Optional[float] = None,
  610. value: Optional[Any] = None,
  611. comment: Optional[str] = None,
  612. **kwargs: Any,
  613. ) -> ls_schemas.Feedback:
  614. """Create feedback for a run.
  615. Args:
  616. run_id (Optional[ls_client.ID_TYPE]): The ID of the run to provide feedback for.
  617. Can be None for project-level feedback.
  618. key (str): The name of the metric or aspect this feedback is about.
  619. score (Optional[float]): The score to rate this run on the metric or aspect.
  620. value (Optional[Any]): The display value or non-numeric value for this feedback.
  621. comment (Optional[str]): A comment about this feedback.
  622. **kwargs: Additional keyword arguments to include in the feedback data.
  623. Returns:
  624. ls_schemas.Feedback: The created feedback object.
  625. Raises:
  626. httpx.HTTPStatusError: If the API request fails.
  627. """ # noqa: E501
  628. data = {
  629. "run_id": ls_client._ensure_uuid(run_id, accept_null=True),
  630. "key": key,
  631. "score": score,
  632. "value": value,
  633. "comment": comment,
  634. **kwargs,
  635. }
  636. response = await self._arequest_with_retries(
  637. "POST", "/feedback", content=ls_client._dumps_json(data)
  638. )
  639. return ls_schemas.Feedback(**response.json())
  640. async def create_feedback_from_token(
  641. self,
  642. token_or_url: Union[str, uuid.UUID],
  643. score: Union[float, int, bool, None] = None,
  644. *,
  645. value: Union[float, int, bool, str, dict, None] = None,
  646. correction: Union[dict, None] = None,
  647. comment: Union[str, None] = None,
  648. metadata: Optional[dict] = None,
  649. ) -> None:
  650. """Create feedback from a presigned token or URL.
  651. Args:
  652. token_or_url (Union[str, uuid.UUID]): The token or URL from which to create
  653. feedback.
  654. score (Union[float, int, bool, None], optional): The score of the feedback.
  655. Defaults to None.
  656. value (Union[float, int, bool, str, dict, None], optional): The value of the
  657. feedback. Defaults to None.
  658. correction (Union[dict, None], optional): The correction of the feedback.
  659. Defaults to None.
  660. comment (Union[str, None], optional): The comment of the feedback. Defaults
  661. to None.
  662. metadata (Optional[dict], optional): Additional metadata for the feedback.
  663. Defaults to None.
  664. Raises:
  665. ValueError: If the source API URL is invalid.
  666. Returns:
  667. None: This method does not return anything.
  668. """
  669. source_api_url, token_uuid = ls_client._parse_token_or_url(
  670. token_or_url, self._api_url, num_parts=1
  671. )
  672. if source_api_url != self._api_url:
  673. raise ValueError(f"Invalid source API URL. {source_api_url}")
  674. response = await self._arequest_with_retries(
  675. "POST",
  676. f"/feedback/tokens/{ls_client._as_uuid(token_uuid)}",
  677. content=ls_client._dumps_json(
  678. {
  679. "score": score,
  680. "value": value,
  681. "correction": correction,
  682. "comment": comment,
  683. "metadata": metadata,
  684. # TODO: Add ID once the API supports it.
  685. }
  686. ),
  687. )
  688. ls_utils.raise_for_status_with_text(response)
  689. async def create_presigned_feedback_token(
  690. self,
  691. run_id: ls_client.ID_TYPE,
  692. feedback_key: str,
  693. *,
  694. expiration: Optional[datetime.datetime | datetime.timedelta] = None,
  695. feedback_config: Optional[ls_schemas.FeedbackConfig] = None,
  696. feedback_id: Optional[ls_client.ID_TYPE] = None,
  697. ) -> ls_schemas.FeedbackIngestToken:
  698. """Create a pre-signed URL to send feedback data to.
  699. This is useful for giving browser-based clients a way to upload
  700. feedback data directly to LangSmith without accessing the
  701. API key.
  702. Args:
  703. run_id:
  704. feedback_key:
  705. expiration: The expiration time of the pre-signed URL.
  706. Either a datetime or a timedelta offset from now.
  707. Default to 3 hours.
  708. feedback_config: FeedbackConfig or None.
  709. If creating a feedback_key for the first time,
  710. this defines how the metric should be interpreted,
  711. such as a continuous score (w/ optional bounds),
  712. or distribution over categorical values.
  713. feedback_id: The ID of the feedback to create. If not provided, a new
  714. feedback will be created.
  715. Returns:
  716. The pre-signed URL for uploading feedback data.
  717. """
  718. body: dict[str, Any] = {
  719. "run_id": run_id,
  720. "feedback_key": feedback_key,
  721. "feedback_config": feedback_config,
  722. "id": feedback_id or str(uuid.uuid4()),
  723. }
  724. if expiration is None:
  725. body["expires_in"] = ls_schemas.TimeDeltaInput(
  726. days=0,
  727. hours=3,
  728. minutes=0,
  729. )
  730. elif isinstance(expiration, datetime.datetime):
  731. body["expires_at"] = expiration.isoformat()
  732. elif isinstance(expiration, datetime.timedelta):
  733. body["expires_in"] = ls_schemas.TimeDeltaInput(
  734. days=expiration.days,
  735. hours=expiration.seconds // 3600,
  736. minutes=(expiration.seconds % 3600) // 60,
  737. )
  738. else:
  739. raise ValueError(
  740. f"Invalid expiration type: {type(expiration)}. "
  741. "Expected datetime.datetime or datetime.timedelta."
  742. )
  743. response = await self._arequest_with_retries(
  744. "POST",
  745. "/feedback/tokens",
  746. content=ls_client._dumps_json(body),
  747. )
  748. return ls_schemas.FeedbackIngestToken(**response.json())
  749. async def read_feedback(
  750. self, feedback_id: ls_client.ID_TYPE
  751. ) -> ls_schemas.Feedback:
  752. """Read feedback."""
  753. response = await self._arequest_with_retries(
  754. "GET", f"/feedback/{ls_client._as_uuid(feedback_id)}"
  755. )
  756. return ls_schemas.Feedback(**response.json())
  757. async def list_feedback(
  758. self,
  759. *,
  760. run_ids: Optional[Sequence[ls_client.ID_TYPE]] = None,
  761. feedback_key: Optional[Sequence[str]] = None,
  762. feedback_source_type: Optional[Sequence[ls_schemas.FeedbackSourceType]] = None,
  763. limit: Optional[int] = None,
  764. **kwargs: Any,
  765. ) -> AsyncIterator[ls_schemas.Feedback]:
  766. """List feedback."""
  767. params = {
  768. "run": (
  769. [str(ls_client._as_uuid(id_)) for id_ in run_ids] if run_ids else None
  770. ),
  771. "limit": min(limit, 100) if limit is not None else 100,
  772. **kwargs,
  773. }
  774. if feedback_key is not None:
  775. params["key"] = feedback_key
  776. if feedback_source_type is not None:
  777. params["source"] = feedback_source_type
  778. ix = 0
  779. async for feedback in self._aget_paginated_list("/feedback", params=params):
  780. yield ls_schemas.Feedback(**feedback)
  781. ix += 1
  782. if limit is not None and ix >= limit:
  783. break
  784. async def delete_feedback(self, feedback_id: ID_TYPE) -> None:
  785. """Delete a feedback by ID.
  786. Args:
  787. feedback_id (Union[UUID, str]):
  788. The ID of the feedback to delete.
  789. Returns:
  790. None
  791. """
  792. response = await self._arequest_with_retries(
  793. "DELETE", f"/feedback/{ls_client._as_uuid(feedback_id, 'feedback_id')}"
  794. )
  795. ls_utils.raise_for_status_with_text(response)
  796. # Annotation Queue API
  797. async def list_annotation_queues(
  798. self,
  799. *,
  800. queue_ids: Optional[list[ID_TYPE]] = None,
  801. name: Optional[str] = None,
  802. name_contains: Optional[str] = None,
  803. limit: Optional[int] = None,
  804. ) -> AsyncIterator[ls_schemas.AnnotationQueue]:
  805. """List the annotation queues on the LangSmith API.
  806. Args:
  807. queue_ids (Optional[List[Union[UUID, str]]]):
  808. The IDs of the queues to filter by.
  809. name (Optional[str]):
  810. The name of the queue to filter by.
  811. name_contains (Optional[str]):
  812. The substring that the queue name should contain.
  813. limit (Optional[int]):
  814. The maximum number of queues to return.
  815. Yields:
  816. The annotation queues.
  817. """
  818. params: dict = {
  819. "ids": (
  820. [
  821. ls_client._as_uuid(id_, f"queue_ids[{i}]")
  822. for i, id_ in enumerate(queue_ids)
  823. ]
  824. if queue_ids is not None
  825. else None
  826. ),
  827. "name": name,
  828. "name_contains": name_contains,
  829. "limit": min(limit, 100) if limit is not None else 100,
  830. }
  831. ix = 0
  832. async for feedback in self._aget_paginated_list(
  833. "/annotation-queues", params=params
  834. ):
  835. yield ls_schemas.AnnotationQueue(**feedback)
  836. ix += 1
  837. if limit is not None and ix >= limit:
  838. break
  839. async def create_annotation_queue(
  840. self,
  841. *,
  842. name: str,
  843. description: Optional[str] = None,
  844. queue_id: Optional[ID_TYPE] = None,
  845. ) -> ls_schemas.AnnotationQueue:
  846. """Create an annotation queue on the LangSmith API.
  847. Args:
  848. name (str):
  849. The name of the annotation queue.
  850. description (Optional[str]):
  851. The description of the annotation queue.
  852. queue_id (Optional[Union[UUID, str]]):
  853. The ID of the annotation queue.
  854. Returns:
  855. AnnotationQueue: The created annotation queue object.
  856. """
  857. body = {
  858. "name": name,
  859. "description": description,
  860. "id": str(queue_id) if queue_id is not None else str(uuid.uuid4()),
  861. }
  862. response = await self._arequest_with_retries(
  863. "POST",
  864. "/annotation-queues",
  865. json={k: v for k, v in body.items() if v is not None},
  866. )
  867. ls_utils.raise_for_status_with_text(response)
  868. return ls_schemas.AnnotationQueue(
  869. **response.json(),
  870. )
  871. async def read_annotation_queue(
  872. self, queue_id: ID_TYPE
  873. ) -> ls_schemas.AnnotationQueue:
  874. """Read an annotation queue with the specified `queue_id`.
  875. Args:
  876. queue_id (Union[UUID, str]): The ID of the annotation queue to read.
  877. Returns:
  878. AnnotationQueue: The annotation queue object.
  879. """
  880. # TODO: Replace when actual endpoint is added
  881. return await self.list_annotation_queues(queue_ids=[queue_id]).__anext__()
  882. async def update_annotation_queue(
  883. self, queue_id: ID_TYPE, *, name: str, description: Optional[str] = None
  884. ) -> None:
  885. """Update an annotation queue with the specified `queue_id`.
  886. Args:
  887. queue_id (Union[UUID, str]): The ID of the annotation queue to update.
  888. name (str): The new name for the annotation queue.
  889. description (Optional[str]): The new description for the
  890. annotation queue. Defaults to None.
  891. Returns:
  892. None
  893. """
  894. response = await self._arequest_with_retries(
  895. "PATCH",
  896. f"/annotation-queues/{ls_client._as_uuid(queue_id, 'queue_id')}",
  897. json={
  898. "name": name,
  899. "description": description,
  900. },
  901. )
  902. ls_utils.raise_for_status_with_text(response)
  903. async def delete_annotation_queue(self, queue_id: ID_TYPE) -> None:
  904. """Delete an annotation queue with the specified `queue_id`.
  905. Args:
  906. queue_id (Union[UUID, str]): The ID of the annotation queue to delete.
  907. Returns:
  908. None
  909. """
  910. response = await self._arequest_with_retries(
  911. "DELETE",
  912. f"/annotation-queues/{ls_client._as_uuid(queue_id, 'queue_id')}",
  913. headers={"Accept": "application/json", **self._client.headers},
  914. )
  915. ls_utils.raise_for_status_with_text(response)
  916. async def add_runs_to_annotation_queue(
  917. self, queue_id: ID_TYPE, *, run_ids: list[ID_TYPE]
  918. ) -> None:
  919. """Add runs to an annotation queue with the specified `queue_id`.
  920. Args:
  921. queue_id (Union[UUID, str]): The ID of the annotation queue.
  922. run_ids (List[Union[UUID, str]]): The IDs of the runs to be added to the annotation
  923. queue.
  924. Returns:
  925. None
  926. """
  927. response = await self._arequest_with_retries(
  928. "POST",
  929. f"/annotation-queues/{ls_client._as_uuid(queue_id, 'queue_id')}/runs",
  930. json=[
  931. str(ls_client._as_uuid(id_, f"run_ids[{i}]"))
  932. for i, id_ in enumerate(run_ids)
  933. ],
  934. )
  935. ls_utils.raise_for_status_with_text(response)
  936. async def delete_run_from_annotation_queue(
  937. self, queue_id: ID_TYPE, *, run_id: ID_TYPE
  938. ) -> None:
  939. """Delete a run from an annotation queue with the specified `queue_id` and `run_id`.
  940. Args:
  941. queue_id (Union[UUID, str]): The ID of the annotation queue.
  942. run_id (Union[UUID, str]): The ID of the run to be added to the annotation
  943. queue.
  944. Returns:
  945. None
  946. """
  947. response = await self._arequest_with_retries(
  948. "DELETE",
  949. f"/annotation-queues/{ls_client._as_uuid(queue_id, 'queue_id')}/runs/{ls_client._as_uuid(run_id, 'run_id')}",
  950. )
  951. ls_utils.raise_for_status_with_text(response)
  952. async def get_run_from_annotation_queue(
  953. self, queue_id: ID_TYPE, *, index: int
  954. ) -> ls_schemas.RunWithAnnotationQueueInfo:
  955. """Get a run from an annotation queue at the specified index.
  956. Args:
  957. queue_id (Union[UUID, str]): The ID of the annotation queue.
  958. index (int): The index of the run to retrieve.
  959. Returns:
  960. RunWithAnnotationQueueInfo: The run at the specified index.
  961. Raises:
  962. LangSmithNotFoundError: If the run is not found at the given index.
  963. LangSmithError: For other API-related errors.
  964. """
  965. base_url = f"/annotation-queues/{ls_client._as_uuid(queue_id, 'queue_id')}/run"
  966. response = await self._arequest_with_retries("GET", f"{base_url}/{index}")
  967. ls_utils.raise_for_status_with_text(response)
  968. return ls_schemas.RunWithAnnotationQueueInfo(**response.json())
  969. @ls_beta.warn_beta
  970. async def index_dataset(
  971. self,
  972. *,
  973. dataset_id: ls_client.ID_TYPE,
  974. tag: str = "latest",
  975. **kwargs: Any,
  976. ) -> None:
  977. """Enable dataset indexing. Examples are indexed by their inputs.
  978. This enables searching for similar examples by inputs with
  979. ``client.similar_examples()``.
  980. Args:
  981. dataset_id (UUID): The ID of the dataset to index.
  982. tag (str, optional): The version of the dataset to index. If 'latest'
  983. then any updates to the dataset (additions, updates, deletions of
  984. examples) will be reflected in the index.
  985. Returns:
  986. None
  987. Raises:
  988. requests.HTTPError: If the request fails.
  989. """ # noqa: E501
  990. dataset_id = ls_client._as_uuid(dataset_id, "dataset_id")
  991. resp = await self._arequest_with_retries(
  992. "POST",
  993. f"/datasets/{dataset_id}/index",
  994. content=ls_client._dumps_json({"tag": tag, **kwargs}),
  995. )
  996. ls_utils.raise_for_status_with_text(resp)
  997. @ls_beta.warn_beta
  998. async def sync_indexed_dataset(
  999. self,
  1000. *,
  1001. dataset_id: ls_client.ID_TYPE,
  1002. **kwargs: Any,
  1003. ) -> None:
  1004. """Sync dataset index.
  1005. This already happens automatically every 5 minutes, but you can call this to
  1006. force a sync.
  1007. Args:
  1008. dataset_id (UUID): The ID of the dataset to sync.
  1009. Returns:
  1010. None
  1011. Raises:
  1012. requests.HTTPError: If the request fails.
  1013. """ # noqa: E501
  1014. dataset_id = ls_client._as_uuid(dataset_id, "dataset_id")
  1015. resp = await self._arequest_with_retries(
  1016. "POST",
  1017. f"/datasets/{dataset_id}/index/sync",
  1018. content=ls_client._dumps_json({**kwargs}),
  1019. )
  1020. ls_utils.raise_for_status_with_text(resp)
  1021. @ls_beta.warn_beta
  1022. async def similar_examples(
  1023. self,
  1024. inputs: dict,
  1025. /,
  1026. *,
  1027. limit: int,
  1028. dataset_id: ls_client.ID_TYPE,
  1029. filter: Optional[str] = None,
  1030. **kwargs: Any,
  1031. ) -> list[ls_schemas.ExampleSearch]:
  1032. r"""Retrieve the dataset examples whose inputs best match the current inputs.
  1033. !!! note
  1034. Must have few-shot indexing enabled for the dataset. See `client.index_dataset()`.
  1035. Args:
  1036. inputs (dict): The inputs to use as a search query. Must match the dataset
  1037. input schema. Must be JSON serializable.
  1038. limit (int): The maximum number of examples to return.
  1039. dataset_id (str or UUID): The ID of the dataset to search over.
  1040. filter (str, optional): A filter string to apply to the search results. Uses
  1041. the same syntax as the `filter` parameter in `list_runs()`. Only a subset
  1042. of operations are supported. Defaults to None.
  1043. kwargs (Any): Additional keyword args to pass as part of request body.
  1044. Returns:
  1045. List of ExampleSearch objects.
  1046. Examples:
  1047. ```python
  1048. from langsmith import Client
  1049. client = Client()
  1050. await client.similar_examples(
  1051. {"question": "When would i use the runnable generator"},
  1052. limit=3,
  1053. dataset_id="...",
  1054. )
  1055. ```
  1056. ```python
  1057. [
  1058. ExampleSearch(
  1059. inputs={
  1060. "question": "How do I cache a Chat model? What caches can I use?"
  1061. },
  1062. outputs={
  1063. "answer": "You can use LangChain's caching layer for Chat Models. This can save you money by reducing the number of API calls you make to the LLM provider, if you're often requesting the same completion multiple times, and speed up your application.\n\n```python\n\nfrom langchain.cache import InMemoryCache\nlangchain.llm_cache = InMemoryCache()\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict('Tell me a joke')\n\n```\n\nYou can also use SQLite Cache which uses a SQLite database:\n\n```python\n rm .langchain.db\n\nfrom langchain.cache import SQLiteCache\nlangchain.llm_cache = SQLiteCache(database_path=\".langchain.db\")\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict('Tell me a joke') \n```\n"
  1064. },
  1065. metadata=None,
  1066. id=UUID("b2ddd1c4-dff6-49ae-8544-f48e39053398"),
  1067. dataset_id=UUID("01b6ce0f-bfb6-4f48-bbb8-f19272135d40"),
  1068. ),
  1069. ExampleSearch(
  1070. inputs={"question": "What's a runnable lambda?"},
  1071. outputs={
  1072. "answer": "A runnable lambda is an object that implements LangChain's `Runnable` interface and runs a callbale (i.e., a function). Note the function must accept a single argument."
  1073. },
  1074. metadata=None,
  1075. id=UUID("f94104a7-2434-4ba7-8293-6a283f4860b4"),
  1076. dataset_id=UUID("01b6ce0f-bfb6-4f48-bbb8-f19272135d40"),
  1077. ),
  1078. ExampleSearch(
  1079. inputs={"question": "Show me how to use RecursiveURLLoader"},
  1080. outputs={
  1081. "answer": 'The RecursiveURLLoader comes from the langchain.document_loaders.recursive_url_loader module. Here\'s an example of how to use it:\n\n```python\nfrom langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader\n\n# Create an instance of RecursiveUrlLoader with the URL you want to load\nloader = RecursiveUrlLoader(url="https://example.com")\n\n# Load all child links from the URL page\nchild_links = loader.load()\n\n# Print the child links\nfor link in child_links:\n print(link)\n```\n\nMake sure to replace "https://example.com" with the actual URL you want to load. The load() method returns a list of child links found on the URL page. You can iterate over this list to access each child link.'
  1082. },
  1083. metadata=None,
  1084. id=UUID("0308ea70-a803-4181-a37d-39e95f138f8c"),
  1085. dataset_id=UUID("01b6ce0f-bfb6-4f48-bbb8-f19272135d40"),
  1086. ),
  1087. ]
  1088. ```
  1089. """ # noqa: E501
  1090. dataset_id = ls_client._as_uuid(dataset_id, "dataset_id")
  1091. req = {
  1092. "inputs": inputs,
  1093. "limit": limit,
  1094. **kwargs,
  1095. }
  1096. if filter:
  1097. req["filter"] = filter
  1098. resp = await self._arequest_with_retries(
  1099. "POST",
  1100. f"/datasets/{dataset_id}/search",
  1101. content=ls_client._dumps_json(req),
  1102. )
  1103. ls_utils.raise_for_status_with_text(resp)
  1104. examples = []
  1105. for ex in resp.json()["examples"]:
  1106. examples.append(ls_schemas.ExampleSearch(**ex, dataset_id=dataset_id))
  1107. return examples
  1108. async def _get_settings(self) -> ls_schemas.LangSmithSettings:
  1109. """Get the settings for the current tenant.
  1110. Returns:
  1111. dict: The settings for the current tenant.
  1112. """
  1113. if self._settings is None:
  1114. response = await self._arequest_with_retries("GET", "/settings")
  1115. ls_utils.raise_for_status_with_text(response)
  1116. self._settings = ls_schemas.LangSmithSettings(**response.json())
  1117. return self._settings
  1118. async def _current_tenant_is_owner(self, owner: str) -> bool:
  1119. """Check if the current workspace has the same handle as owner.
  1120. Args:
  1121. owner (str): The owner to check against.
  1122. Returns:
  1123. bool: True if the current tenant is the owner, False otherwise.
  1124. """
  1125. settings = await self._get_settings()
  1126. return owner == "-" or settings.tenant_handle == owner
  1127. async def _owner_conflict_error(
  1128. self, action: str, owner: str
  1129. ) -> ls_utils.LangSmithUserError:
  1130. settings = await self._get_settings()
  1131. return ls_utils.LangSmithUserError(
  1132. f"Cannot {action} for another tenant.\n"
  1133. f"Current tenant: {settings.tenant_handle},\n"
  1134. f"Requested tenant: {owner}"
  1135. )
  1136. async def _get_latest_commit_hash(
  1137. self, prompt_owner_and_name: str, limit: int = 1, offset: int = 0
  1138. ) -> Optional[str]:
  1139. """Get the latest commit hash for a prompt.
  1140. Args:
  1141. prompt_owner_and_name (str): The owner and name of the prompt.
  1142. limit (int, default=1): The maximum number of commits to fetch. Defaults to 1.
  1143. offset (int, default=0): The number of commits to skip. Defaults to 0.
  1144. Returns:
  1145. Optional[str]: The latest commit hash, or None if no commits are found.
  1146. """
  1147. response = await self._arequest_with_retries(
  1148. "GET",
  1149. f"/commits/{prompt_owner_and_name}/",
  1150. params={"limit": limit, "offset": offset},
  1151. )
  1152. commits = response.json()["commits"]
  1153. return commits[0]["commit_hash"] if commits else None
  1154. async def _like_or_unlike_prompt(
  1155. self, prompt_identifier: str, like: bool
  1156. ) -> dict[str, int]:
  1157. """Like or unlike a prompt.
  1158. Args:
  1159. prompt_identifier (str): The identifier of the prompt.
  1160. like (bool): True to like the prompt, False to unlike it.
  1161. Returns:
  1162. A dictionary with the key 'likes' and the count of likes as the value.
  1163. Raises:
  1164. requests.exceptions.HTTPError: If the prompt is not found or
  1165. another error occurs.
  1166. """
  1167. owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
  1168. response = await self._arequest_with_retries(
  1169. "POST", f"/likes/{owner}/{prompt_name}", json={"like": like}
  1170. )
  1171. response.raise_for_status()
  1172. return response.json()
  1173. async def _get_prompt_url(self, prompt_identifier: str) -> str:
  1174. """Get a URL for a prompt.
  1175. Args:
  1176. prompt_identifier (str): The identifier of the prompt.
  1177. Returns:
  1178. str: The URL for the prompt.
  1179. """
  1180. owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier(
  1181. prompt_identifier
  1182. )
  1183. if not self._current_tenant_is_owner(owner):
  1184. return f"{self._host_url}/hub/{owner}/{prompt_name}:{commit_hash[:8]}"
  1185. settings = await self._get_settings()
  1186. return (
  1187. f"{self._host_url}/prompts/{prompt_name}/{commit_hash[:8]}"
  1188. f"?organizationId={settings.id}"
  1189. )
  1190. async def _prompt_exists(self, prompt_identifier: str) -> bool:
  1191. """Check if a prompt exists.
  1192. Args:
  1193. prompt_identifier (str): The identifier of the prompt.
  1194. Returns:
  1195. bool: True if the prompt exists, False otherwise.
  1196. """
  1197. prompt = await self.get_prompt(prompt_identifier)
  1198. return True if prompt else False
  1199. async def like_prompt(self, prompt_identifier: str) -> dict[str, int]:
  1200. """Like a prompt.
  1201. Args:
  1202. prompt_identifier (str): The identifier of the prompt.
  1203. Returns:
  1204. Dict[str, int]: A dictionary with the key 'likes' and the count of likes as the value.
  1205. """
  1206. return await self._like_or_unlike_prompt(prompt_identifier, like=True)
  1207. async def unlike_prompt(self, prompt_identifier: str) -> dict[str, int]:
  1208. """Unlike a prompt.
  1209. Args:
  1210. prompt_identifier (str): The identifier of the prompt.
  1211. Returns:
  1212. Dict[str, int]: A dictionary with the key 'likes' and the count of likes as the value.
  1213. """
  1214. return await self._like_or_unlike_prompt(prompt_identifier, like=False)
  1215. async def list_prompts(
  1216. self,
  1217. *,
  1218. limit: int = 100,
  1219. offset: int = 0,
  1220. is_public: Optional[bool] = None,
  1221. is_archived: Optional[bool] = False,
  1222. sort_field: ls_schemas.PromptSortField = ls_schemas.PromptSortField.updated_at,
  1223. sort_direction: Literal["desc", "asc"] = "desc",
  1224. query: Optional[str] = None,
  1225. ) -> ls_schemas.ListPromptsResponse:
  1226. """List prompts with pagination.
  1227. Args:
  1228. limit (int, default=100): The maximum number of prompts to return. Defaults to 100.
  1229. offset (int, default=0): The number of prompts to skip. Defaults to 0.
  1230. is_public (Optional[bool]): Filter prompts by if they are public.
  1231. is_archived (Optional[bool]): Filter prompts by if they are archived.
  1232. sort_field (PromptSortField): The field to sort by.
  1233. Defaults to "updated_at".
  1234. sort_direction (Literal["desc", "asc"], default="desc"): The order to sort by.
  1235. Defaults to "desc".
  1236. query (Optional[str]): Filter prompts by a search query.
  1237. Returns:
  1238. ListPromptsResponse: A response object containing
  1239. the list of prompts.
  1240. """
  1241. params = {
  1242. "limit": limit,
  1243. "offset": offset,
  1244. "is_public": (
  1245. "true" if is_public else "false" if is_public is not None else None
  1246. ),
  1247. "is_archived": "true" if is_archived else "false",
  1248. "sort_field": (
  1249. sort_field.value
  1250. if isinstance(sort_field, ls_schemas.PromptSortField)
  1251. else sort_field
  1252. ),
  1253. "sort_direction": sort_direction,
  1254. "query": query,
  1255. "match_prefix": "true" if query else None,
  1256. }
  1257. response = await self._arequest_with_retries(
  1258. "GET", "/repos/", params=_exclude_none(params)
  1259. )
  1260. return ls_schemas.ListPromptsResponse(**response.json())
  1261. async def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]:
  1262. """Get a specific prompt by its identifier.
  1263. Args:
  1264. prompt_identifier (str): The identifier of the prompt.
  1265. The identifier should be in the format "prompt_name" or "owner/prompt_name".
  1266. Returns:
  1267. Optional[Prompt]: The prompt object.
  1268. Raises:
  1269. requests.exceptions.HTTPError: If the prompt is not found or
  1270. another error occurs.
  1271. """
  1272. owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
  1273. try:
  1274. response = await self._arequest_with_retries(
  1275. "GET", f"/repos/{owner}/{prompt_name}"
  1276. )
  1277. return ls_schemas.Prompt(**response.json()["repo"])
  1278. except ls_utils.LangSmithNotFoundError:
  1279. return None
  1280. async def create_prompt(
  1281. self,
  1282. prompt_identifier: str,
  1283. *,
  1284. description: Optional[str] = None,
  1285. readme: Optional[str] = None,
  1286. tags: Optional[Sequence[str]] = None,
  1287. is_public: bool = False,
  1288. ) -> ls_schemas.Prompt:
  1289. """Create a new prompt.
  1290. Does not attach prompt object, just creates an empty prompt.
  1291. Args:
  1292. prompt_identifier (str): The identifier of the prompt.
  1293. The identifier should be in the formatof owner/name:hash, name:hash, owner/name, or name
  1294. description (Optional[str]): A description of the prompt.
  1295. readme (Optional[str]): A readme for the prompt.
  1296. tags (Optional[Sequence[str]]): A list of tags for the prompt.
  1297. is_public (bool): Whether the prompt should be public. Defaults to False.
  1298. Returns:
  1299. Prompt: The created prompt object.
  1300. Raises:
  1301. ValueError: If the current tenant is not the owner.
  1302. HTTPError: If the server request fails.
  1303. """
  1304. settings = await self._get_settings()
  1305. if is_public and not settings.tenant_handle:
  1306. raise ls_utils.LangSmithUserError(
  1307. "Cannot create a public prompt without first\n"
  1308. "creating a LangChain Hub handle. "
  1309. "You can add a handle by creating a public prompt at:\n"
  1310. "https://smith.langchain.com/prompts"
  1311. )
  1312. owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
  1313. if not (await self._current_tenant_is_owner(owner=owner)):
  1314. raise (await self._owner_conflict_error("create a prompt", owner))
  1315. json: dict[str, Union[str, bool, Sequence[str]]] = {
  1316. "repo_handle": prompt_name,
  1317. "description": description or "",
  1318. "readme": readme or "",
  1319. "tags": tags or [],
  1320. "is_public": is_public,
  1321. }
  1322. response = await self._arequest_with_retries("POST", "/repos/", json=json)
  1323. response.raise_for_status()
  1324. return ls_schemas.Prompt(**response.json()["repo"])
  1325. async def create_commit(
  1326. self,
  1327. prompt_identifier: str,
  1328. object: Any,
  1329. *,
  1330. parent_commit_hash: Optional[str] = None,
  1331. ) -> str:
  1332. """Create a commit for an existing prompt.
  1333. Args:
  1334. prompt_identifier (str): The identifier of the prompt.
  1335. object (Any): The LangChain object to commit.
  1336. parent_commit_hash (Optional[str]): The hash of the parent commit.
  1337. Defaults to latest commit.
  1338. Returns:
  1339. str: The url of the prompt commit.
  1340. Raises:
  1341. HTTPError: If the server request fails.
  1342. ValueError: If the prompt does not exist.
  1343. """
  1344. if not (await self._prompt_exists(prompt_identifier)):
  1345. raise ls_utils.LangSmithNotFoundError(
  1346. "Prompt does not exist, you must create it first."
  1347. )
  1348. try:
  1349. from langchain_core.load import dumps
  1350. except ImportError:
  1351. raise ImportError(
  1352. "The client.create_commit function requires the langchain-core"
  1353. "package to run.\nInstall with `pip install langchain-core`"
  1354. )
  1355. chain_to_push = ls_client.prep_obj_for_push(object)
  1356. json_object = dumps(chain_to_push)
  1357. manifest_dict = json.loads(json_object)
  1358. owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
  1359. prompt_owner_and_name = f"{owner}/{prompt_name}"
  1360. if parent_commit_hash == "latest" or parent_commit_hash is None:
  1361. parent_commit_hash = await self._get_latest_commit_hash(
  1362. prompt_owner_and_name
  1363. )
  1364. request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict}
  1365. response = await self._arequest_with_retries(
  1366. "POST", f"/commits/{prompt_owner_and_name}", json=request_dict
  1367. )
  1368. commit_hash = response.json()["commit"]["commit_hash"]
  1369. return await self._get_prompt_url(f"{prompt_owner_and_name}:{commit_hash}")
  1370. async def update_prompt(
  1371. self,
  1372. prompt_identifier: str,
  1373. *,
  1374. description: Optional[str] = None,
  1375. readme: Optional[str] = None,
  1376. tags: Optional[Sequence[str]] = None,
  1377. is_public: Optional[bool] = None,
  1378. is_archived: Optional[bool] = None,
  1379. ) -> dict[str, Any]:
  1380. """Update a prompt's metadata.
  1381. To update the content of a prompt, use push_prompt or create_commit instead.
  1382. Args:
  1383. prompt_identifier (str): The identifier of the prompt to update.
  1384. description (Optional[str]): New description for the prompt.
  1385. readme (Optional[str]): New readme for the prompt.
  1386. tags (Optional[Sequence[str]]): New list of tags for the prompt.
  1387. is_public (Optional[bool]): New public status for the prompt.
  1388. is_archived (Optional[bool]): New archived status for the prompt.
  1389. Returns:
  1390. Dict[str, Any]: The updated prompt data as returned by the server.
  1391. Raises:
  1392. ValueError: If the prompt_identifier is empty.
  1393. HTTPError: If the server request fails.
  1394. """
  1395. settings = await self._get_settings()
  1396. if is_public and not settings.tenant_handle:
  1397. raise ValueError(
  1398. "Cannot create a public prompt without first\n"
  1399. "creating a LangChain Hub handle. "
  1400. "You can add a handle by creating a public prompt at:\n"
  1401. "https://smith.langchain.com/prompts"
  1402. )
  1403. json: dict[str, Union[str, bool, Sequence[str]]] = {}
  1404. if description is not None:
  1405. json["description"] = description
  1406. if readme is not None:
  1407. json["readme"] = readme
  1408. if is_public is not None:
  1409. json["is_public"] = is_public
  1410. if is_archived is not None:
  1411. json["is_archived"] = is_archived
  1412. if tags is not None:
  1413. json["tags"] = tags
  1414. owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
  1415. response = await self._arequest_with_retries(
  1416. "PATCH", f"/repos/{owner}/{prompt_name}", json=json
  1417. )
  1418. response.raise_for_status()
  1419. return response.json()
  1420. async def delete_prompt(self, prompt_identifier: str) -> None:
  1421. """Delete a prompt.
  1422. Args:
  1423. prompt_identifier (str): The identifier of the prompt to delete.
  1424. Returns:
  1425. bool: True if the prompt was successfully deleted, False otherwise.
  1426. Raises:
  1427. ValueError: If the current tenant is not the owner of the prompt.
  1428. """
  1429. owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
  1430. if not (await self._current_tenant_is_owner(owner)):
  1431. raise (await self._owner_conflict_error("delete a prompt", owner))
  1432. response = await self._arequest_with_retries(
  1433. "DELETE", f"/repos/{owner}/{prompt_name}"
  1434. )
  1435. response.raise_for_status()
  1436. async def pull_prompt_commit(
  1437. self,
  1438. prompt_identifier: str,
  1439. *,
  1440. include_model: Optional[bool] = False,
  1441. ) -> ls_schemas.PromptCommit:
  1442. """Pull a prompt object from the LangSmith API.
  1443. Args:
  1444. prompt_identifier (str): The identifier of the prompt.
  1445. Returns:
  1446. PromptCommit: The prompt object.
  1447. Raises:
  1448. ValueError: If no commits are found for the prompt.
  1449. """
  1450. owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier(
  1451. prompt_identifier
  1452. )
  1453. response = await self._arequest_with_retries(
  1454. "GET",
  1455. (
  1456. f"/commits/{owner}/{prompt_name}/{commit_hash}"
  1457. f"{'?include_model=true' if include_model else ''}"
  1458. ),
  1459. )
  1460. return ls_schemas.PromptCommit(
  1461. **{"owner": owner, "repo": prompt_name, **response.json()}
  1462. )
  1463. async def list_prompt_commits(
  1464. self,
  1465. prompt_identifier: str,
  1466. *,
  1467. limit: Optional[int] = None,
  1468. offset: int = 0,
  1469. include_model: bool = False,
  1470. ) -> AsyncGenerator[ls_schemas.ListedPromptCommit, None]:
  1471. """List commits for a given prompt.
  1472. Args:
  1473. prompt_identifier (str): The identifier of the prompt in the format 'owner/repo_name'.
  1474. limit (Optional[int]): The maximum number of commits to return. If None, returns all commits. Defaults to None.
  1475. offset (int, default=0): The number of commits to skip before starting to return results. Defaults to 0.
  1476. include_model (bool, default=False): Whether to include the model information in the commit data. Defaults to False.
  1477. Yields:
  1478. A ListedPromptCommit object for each commit.
  1479. !!! note
  1480. This method uses pagination to retrieve commits. It will make multiple API calls if necessary to retrieve all commits
  1481. or up to the specified limit.
  1482. """
  1483. owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
  1484. params = {
  1485. "limit": min(100, limit) if limit is not None else limit,
  1486. "offset": offset,
  1487. "include_model": include_model,
  1488. }
  1489. i = 0
  1490. while True:
  1491. params["offset"] = offset
  1492. response = await self._arequest_with_retries(
  1493. "GET",
  1494. f"/commits/{owner}/{prompt_name}/",
  1495. params=params,
  1496. )
  1497. val = response.json()
  1498. items = val["commits"]
  1499. total = val["total"]
  1500. if not items:
  1501. break
  1502. for it in items:
  1503. if limit is not None and i >= limit:
  1504. return # Stop iteration if we've reached the limit
  1505. yield ls_schemas.ListedPromptCommit(
  1506. **{"owner": owner, "repo": prompt_name, **it}
  1507. )
  1508. i += 1
  1509. offset += len(items)
  1510. if offset >= total:
  1511. break
  1512. async def pull_prompt(
  1513. self, prompt_identifier: str, *, include_model: Optional[bool] = False
  1514. ) -> Any:
  1515. """Pull a prompt and return it as a LangChain `PromptTemplate`.
  1516. This method requires [`langchain-core`](https://pypi.org/project/langchain-core).
  1517. Args:
  1518. prompt_identifier: The identifier of the prompt.
  1519. include_model: Whether to include the model information in the prompt data.
  1520. Returns:
  1521. Any: The prompt object in the specified format.
  1522. """
  1523. try:
  1524. from langchain_core.language_models.base import BaseLanguageModel
  1525. from langchain_core.load.load import loads
  1526. from langchain_core.output_parsers import BaseOutputParser
  1527. from langchain_core.prompts import BasePromptTemplate
  1528. from langchain_core.prompts.structured import StructuredPrompt
  1529. from langchain_core.runnables.base import RunnableBinding, RunnableSequence
  1530. except ImportError:
  1531. raise ImportError(
  1532. "The client.pull_prompt function requires the langchain-core"
  1533. "package to run.\nInstall with `pip install langchain-core`"
  1534. )
  1535. try:
  1536. from langchain_core._api import suppress_langchain_beta_warning
  1537. except ImportError:
  1538. @contextlib.contextmanager
  1539. def suppress_langchain_beta_warning():
  1540. yield
  1541. prompt_object = await self.pull_prompt_commit(
  1542. prompt_identifier, include_model=include_model
  1543. )
  1544. with suppress_langchain_beta_warning():
  1545. prompt = loads(json.dumps(prompt_object.manifest))
  1546. if (
  1547. isinstance(prompt, BasePromptTemplate)
  1548. or isinstance(prompt, RunnableSequence)
  1549. and isinstance(prompt.first, BasePromptTemplate)
  1550. ):
  1551. prompt_template = (
  1552. prompt
  1553. if isinstance(prompt, BasePromptTemplate)
  1554. else (
  1555. prompt.first
  1556. if isinstance(prompt, RunnableSequence)
  1557. and isinstance(prompt.first, BasePromptTemplate)
  1558. else None
  1559. )
  1560. )
  1561. if prompt_template is None:
  1562. raise ls_utils.LangSmithError(
  1563. "Prompt object is not a valid prompt template."
  1564. )
  1565. if prompt_template.metadata is None:
  1566. prompt_template.metadata = {}
  1567. prompt_template.metadata.update(
  1568. {
  1569. "lc_hub_owner": prompt_object.owner,
  1570. "lc_hub_repo": prompt_object.repo,
  1571. "lc_hub_commit_hash": prompt_object.commit_hash,
  1572. }
  1573. )
  1574. # Transform 2-step RunnableSequence to 3-step for structured prompts
  1575. # See create_commit for the reverse transformation when pushing a prompt
  1576. if (
  1577. include_model
  1578. and isinstance(prompt, RunnableSequence)
  1579. and isinstance(prompt.first, StructuredPrompt)
  1580. # Make forward-compatible in case we let update the response type
  1581. and (
  1582. len(prompt.steps) == 2 and not isinstance(prompt.last, BaseOutputParser)
  1583. )
  1584. ):
  1585. if isinstance(prompt.last, RunnableBinding) and isinstance(
  1586. prompt.last.bound, BaseLanguageModel
  1587. ):
  1588. seq = cast(RunnableSequence, prompt.first | prompt.last.bound)
  1589. if len(seq.steps) == 3: # prompt | bound llm | output parser
  1590. rebound_llm = seq.steps[1]
  1591. prompt = RunnableSequence(
  1592. prompt.first,
  1593. rebound_llm.bind(**{**prompt.last.kwargs}),
  1594. seq.last,
  1595. )
  1596. else:
  1597. prompt = seq # Not sure
  1598. elif isinstance(prompt.last, BaseLanguageModel):
  1599. prompt: RunnableSequence = prompt.first | prompt.last # type: ignore[no-redef, assignment]
  1600. else:
  1601. pass
  1602. return prompt
  1603. async def push_prompt(
  1604. self,
  1605. prompt_identifier: str,
  1606. *,
  1607. object: Optional[Any] = None,
  1608. parent_commit_hash: str = "latest",
  1609. is_public: Optional[bool] = None,
  1610. description: Optional[str] = None,
  1611. readme: Optional[str] = None,
  1612. tags: Optional[Sequence[str]] = None,
  1613. ) -> str:
  1614. """Push a prompt to the LangSmith API.
  1615. Can be used to update prompt metadata or prompt content.
  1616. If the prompt does not exist, it will be created.
  1617. If the prompt exists, it will be updated.
  1618. Args:
  1619. prompt_identifier (str): The identifier of the prompt.
  1620. object (Optional[Any]): The LangChain object to push.
  1621. parent_commit_hash (str): The parent commit hash.
  1622. Defaults to "latest".
  1623. is_public (Optional[bool]): Whether the prompt should be public.
  1624. If None (default), the current visibility status is maintained for existing prompts.
  1625. For new prompts, None defaults to private.
  1626. Set to True to make public, or False to make private.
  1627. description (Optional[str]): A description of the prompt.
  1628. Defaults to an empty string.
  1629. readme (Optional[str]): A readme for the prompt.
  1630. Defaults to an empty string.
  1631. tags (Optional[Sequence[str]]): A list of tags for the prompt.
  1632. Defaults to an empty list.
  1633. Returns:
  1634. str: The URL of the prompt.
  1635. """
  1636. # Create or update prompt metadata
  1637. if await self._prompt_exists(prompt_identifier):
  1638. if any(
  1639. param is not None for param in [is_public, description, readme, tags]
  1640. ):
  1641. await self.update_prompt(
  1642. prompt_identifier,
  1643. description=description,
  1644. readme=readme,
  1645. tags=tags,
  1646. is_public=is_public,
  1647. )
  1648. else:
  1649. await self.create_prompt(
  1650. prompt_identifier,
  1651. is_public=is_public if is_public is not None else False,
  1652. description=description,
  1653. readme=readme,
  1654. tags=tags,
  1655. )
  1656. if object is None:
  1657. return await self._get_prompt_url(prompt_identifier=prompt_identifier)
  1658. # Create a commit with the new manifest
  1659. url = await self.create_commit(
  1660. prompt_identifier,
  1661. object,
  1662. parent_commit_hash=parent_commit_hash,
  1663. )
  1664. return url
  1665. def _exclude_none(d: dict) -> dict:
  1666. """Exclude None values from a dictionary."""
  1667. return {k: v for k, v in d.items() if v is not None}