client.py 251 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887
  1. """The LangGraph client implementations connect to the LangGraph API.
  2. This module provides both asynchronous (`get_client(url="http://localhost:2024")` or
  3. `LangGraphClient`) and synchronous (`get_sync_client(url="http://localhost:2024")` or
  4. `SyncLanggraphClient`) clients to interacting with the LangGraph API's core resources
  5. such as Assistants, Threads, Runs, and Cron jobs, as well as its persistent document
  6. Store.
  7. """
  8. from __future__ import annotations
  9. import asyncio
  10. import functools
  11. import logging
  12. import os
  13. import re
  14. import sys
  15. import warnings
  16. from collections.abc import AsyncIterator, Callable, Iterator, Mapping, Sequence
  17. from types import TracebackType
  18. from typing import (
  19. Any,
  20. Literal,
  21. cast,
  22. overload,
  23. )
  24. import httpx
  25. import orjson
  26. import langgraph_sdk
  27. from langgraph_sdk.errors import _araise_for_status_typed, _raise_for_status_typed
  28. from langgraph_sdk.schema import (
  29. All,
  30. Assistant,
  31. AssistantSelectField,
  32. AssistantSortBy,
  33. AssistantsSearchResponse,
  34. AssistantVersion,
  35. CancelAction,
  36. Checkpoint,
  37. Command,
  38. Config,
  39. Context,
  40. Cron,
  41. CronSelectField,
  42. CronSortBy,
  43. DisconnectMode,
  44. Durability,
  45. GraphSchema,
  46. IfNotExists,
  47. Input,
  48. Item,
  49. Json,
  50. ListNamespaceResponse,
  51. MultitaskStrategy,
  52. OnCompletionBehavior,
  53. OnConflictBehavior,
  54. QueryParamTypes,
  55. Run,
  56. RunCreate,
  57. RunCreateMetadata,
  58. RunSelectField,
  59. RunStatus,
  60. SearchItemsResponse,
  61. SortOrder,
  62. StreamMode,
  63. StreamPart,
  64. Subgraphs,
  65. Thread,
  66. ThreadSelectField,
  67. ThreadSortBy,
  68. ThreadState,
  69. ThreadStatus,
  70. ThreadStreamMode,
  71. ThreadUpdateStateResponse,
  72. )
  73. from langgraph_sdk.sse import SSEDecoder, aiter_lines_raw, iter_lines_raw
  74. logger = logging.getLogger(__name__)
  75. RESERVED_HEADERS = ("x-api-key",)
  76. NOT_PROVIDED = cast(None, object())
  77. def _get_api_key(api_key: str | None = NOT_PROVIDED) -> str | None:
  78. """Get the API key from the environment.
  79. Precedence:
  80. 1. explicit string argument
  81. 2. LANGGRAPH_API_KEY (if api_key not provided)
  82. 3. LANGSMITH_API_KEY (if api_key not provided)
  83. 4. LANGCHAIN_API_KEY (if api_key not provided)
  84. Args:
  85. api_key: The API key to use. Can be:
  86. - A string: use this exact API key
  87. - None: explicitly skip loading from environment
  88. - NOT_PROVIDED (default): auto-load from environment variables
  89. """
  90. if isinstance(api_key, str):
  91. return api_key
  92. if api_key is NOT_PROVIDED:
  93. # api_key is not explicitly provided, try to load from environment
  94. for prefix in ["LANGGRAPH", "LANGSMITH", "LANGCHAIN"]:
  95. if env := os.getenv(f"{prefix}_API_KEY"):
  96. return env.strip().strip('"').strip("'")
  97. # api_key is explicitly None, don't load from environment
  98. return None
  99. def _get_headers(
  100. api_key: str | None,
  101. custom_headers: Mapping[str, str] | None,
  102. ) -> dict[str, str]:
  103. """Combine api_key and custom user-provided headers."""
  104. custom_headers = custom_headers or {}
  105. for header in RESERVED_HEADERS:
  106. if header in custom_headers:
  107. raise ValueError(f"Cannot set reserved header '{header}'")
  108. headers = {
  109. "User-Agent": f"langgraph-sdk-py/{langgraph_sdk.__version__}",
  110. **custom_headers,
  111. }
  112. resolved_api_key = _get_api_key(api_key)
  113. if resolved_api_key:
  114. headers["x-api-key"] = resolved_api_key
  115. return headers
  116. def _orjson_default(obj: Any) -> Any:
  117. is_class = isinstance(obj, type)
  118. if hasattr(obj, "model_dump") and callable(obj.model_dump):
  119. if is_class:
  120. raise TypeError(
  121. f"Cannot JSON-serialize type object: {obj!r}. Did you mean to pass an instance of the object instead?"
  122. f"\nReceived type: {obj!r}"
  123. )
  124. return obj.model_dump()
  125. elif hasattr(obj, "dict") and callable(obj.dict):
  126. if is_class:
  127. raise TypeError(
  128. f"Cannot JSON-serialize type object: {obj!r}. Did you mean to pass an instance of the object instead?"
  129. f"\nReceived type: {obj!r}"
  130. )
  131. return obj.dict()
  132. elif isinstance(obj, (set, frozenset)):
  133. return list(obj)
  134. else:
  135. raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
  136. # Compiled regex pattern for extracting run metadata from Content-Location header
  137. _RUN_METADATA_PATTERN = re.compile(
  138. r"(\/threads\/(?P<thread_id>.+))?\/runs\/(?P<run_id>.+)"
  139. )
  140. def _get_run_metadata_from_response(
  141. response: httpx.Response,
  142. ) -> RunCreateMetadata | None:
  143. """Extract run metadata from the response headers."""
  144. if (content_location := response.headers.get("Content-Location")) and (
  145. match := _RUN_METADATA_PATTERN.search(content_location)
  146. ):
  147. return RunCreateMetadata(
  148. run_id=match.group("run_id"),
  149. thread_id=match.group("thread_id") or None,
  150. )
  151. return None
  152. def get_client(
  153. *,
  154. url: str | None = None,
  155. api_key: str | None = NOT_PROVIDED,
  156. headers: Mapping[str, str] | None = None,
  157. timeout: TimeoutTypes | None = None,
  158. ) -> LangGraphClient:
  159. """Create and configure a LangGraphClient.
  160. The client provides programmatic access to LangSmith Deployment. It supports
  161. both remote servers and local in-process connections (when running inside a LangGraph server).
  162. Args:
  163. url:
  164. Base URL of the LangGraph API.
  165. - If `None`, the client first attempts an in-process connection via ASGI transport.
  166. If that fails, it defers registration until after app initialization. This
  167. only works if the client is used from within the Agent server.
  168. api_key:
  169. API key for authentication. Can be:
  170. - A string: use this exact API key
  171. - `None`: explicitly skip loading from environment variables
  172. - Not provided (default): auto-load from environment in this order:
  173. 1. `LANGGRAPH_API_KEY`
  174. 2. `LANGSMITH_API_KEY`
  175. 3. `LANGCHAIN_API_KEY`
  176. headers:
  177. Additional HTTP headers to include in requests. Merged with authentication headers.
  178. timeout:
  179. HTTP timeout configuration. May be:
  180. - `httpx.Timeout` instance
  181. - float (total seconds)
  182. - tuple `(connect, read, write, pool)` in seconds
  183. Defaults: connect=5, read=300, write=300, pool=5.
  184. Returns:
  185. LangGraphClient:
  186. A top-level client exposing sub-clients for assistants, threads,
  187. runs, and cron operations.
  188. ???+ example "Connect to a remote server:"
  189. ```python
  190. from langgraph_sdk import get_client
  191. # get top-level LangGraphClient
  192. client = get_client(url="http://localhost:8123")
  193. # example usage: client.<model>.<method_name>()
  194. assistants = await client.assistants.get(assistant_id="some_uuid")
  195. ```
  196. ???+ example "Connect in-process to a running LangGraph server:"
  197. ```python
  198. from langgraph_sdk import get_client
  199. client = get_client(url=None)
  200. async def my_node(...):
  201. subagent_result = await client.runs.wait(
  202. thread_id=None,
  203. assistant_id="agent",
  204. input={"messages": [{"role": "user", "content": "Foo"}]},
  205. )
  206. ```
  207. ???+ example "Skip auto-loading API key from environment:"
  208. ```python
  209. from langgraph_sdk import get_client
  210. # Don't load API key from environment variables
  211. client = get_client(
  212. url="http://localhost:8123",
  213. api_key=None
  214. )
  215. ```
  216. """
  217. transport: httpx.AsyncBaseTransport | None = None
  218. if url is None:
  219. url = "http://api"
  220. if os.environ.get("__LANGGRAPH_DEFER_LOOPBACK_TRANSPORT") == "true":
  221. transport = get_asgi_transport()(app=None, root_path="/noauth")
  222. _registered_transports.append(transport)
  223. else:
  224. try:
  225. from langgraph_api.server import app # type: ignore
  226. transport = get_asgi_transport()(app, root_path="/noauth")
  227. except Exception:
  228. logger.debug(
  229. "Failed to connect to in-process LangGraph server. Deferring configuration.",
  230. exc_info=True,
  231. )
  232. transport = get_asgi_transport()(app=None, root_path="/noauth")
  233. _registered_transports.append(transport)
  234. if transport is None:
  235. transport = httpx.AsyncHTTPTransport(retries=5)
  236. client = httpx.AsyncClient(
  237. base_url=url,
  238. transport=transport,
  239. timeout=(
  240. httpx.Timeout(timeout) # ty: ignore[invalid-argument-type]
  241. if timeout is not None
  242. else httpx.Timeout(connect=5, read=300, write=300, pool=5)
  243. ),
  244. headers=_get_headers(api_key, headers),
  245. )
  246. return LangGraphClient(client)
  247. class LangGraphClient:
  248. """Top-level client for LangGraph API.
  249. Attributes:
  250. assistants: Manages versioned configuration for your graphs.
  251. threads: Handles (potentially) multi-turn interactions, such as conversational threads.
  252. runs: Controls individual invocations of the graph.
  253. crons: Manages scheduled operations.
  254. store: Interfaces with persistent, shared data storage.
  255. """
  256. def __init__(self, client: httpx.AsyncClient) -> None:
  257. self.http = HttpClient(client)
  258. self.assistants = AssistantsClient(self.http)
  259. self.threads = ThreadsClient(self.http)
  260. self.runs = RunsClient(self.http)
  261. self.crons = CronClient(self.http)
  262. self.store = StoreClient(self.http)
  263. async def __aenter__(self) -> LangGraphClient:
  264. """Enter the async context manager."""
  265. return self
  266. async def __aexit__(
  267. self,
  268. exc_type: type[BaseException] | None,
  269. exc_val: BaseException | None,
  270. exc_tb: TracebackType | None,
  271. ) -> None:
  272. """Exit the async context manager."""
  273. await self.aclose()
  274. async def aclose(self) -> None:
  275. """Close the underlying HTTP client."""
  276. if hasattr(self, "http"):
  277. await self.http.client.aclose()
  278. class HttpClient:
  279. """Handle async requests to the LangGraph API.
  280. Adds additional error messaging & content handling above the
  281. provided httpx client.
  282. Attributes:
  283. client (httpx.AsyncClient): Underlying HTTPX async client.
  284. """
  285. def __init__(self, client: httpx.AsyncClient) -> None:
  286. self.client = client
  287. async def get(
  288. self,
  289. path: str,
  290. *,
  291. params: QueryParamTypes | None = None,
  292. headers: Mapping[str, str] | None = None,
  293. on_response: Callable[[httpx.Response], None] | None = None,
  294. ) -> Any:
  295. """Send a `GET` request."""
  296. r = await self.client.get(path, params=params, headers=headers)
  297. if on_response:
  298. on_response(r)
  299. await _araise_for_status_typed(r)
  300. return await _adecode_json(r)
  301. async def post(
  302. self,
  303. path: str,
  304. *,
  305. json: dict[str, Any] | list | None,
  306. params: QueryParamTypes | None = None,
  307. headers: Mapping[str, str] | None = None,
  308. on_response: Callable[[httpx.Response], None] | None = None,
  309. ) -> Any:
  310. """Send a `POST` request."""
  311. if json is not None:
  312. request_headers, content = await _aencode_json(json)
  313. else:
  314. request_headers, content = {}, b""
  315. # Merge headers, with runtime headers taking precedence
  316. if headers:
  317. request_headers.update(headers)
  318. r = await self.client.post(
  319. path, headers=request_headers, content=content, params=params
  320. )
  321. if on_response:
  322. on_response(r)
  323. await _araise_for_status_typed(r)
  324. return await _adecode_json(r)
  325. async def put(
  326. self,
  327. path: str,
  328. *,
  329. json: dict,
  330. params: QueryParamTypes | None = None,
  331. headers: Mapping[str, str] | None = None,
  332. on_response: Callable[[httpx.Response], None] | None = None,
  333. ) -> Any:
  334. """Send a `PUT` request."""
  335. request_headers, content = await _aencode_json(json)
  336. if headers:
  337. request_headers.update(headers)
  338. r = await self.client.put(
  339. path, headers=request_headers, content=content, params=params
  340. )
  341. if on_response:
  342. on_response(r)
  343. await _araise_for_status_typed(r)
  344. return await _adecode_json(r)
  345. async def patch(
  346. self,
  347. path: str,
  348. *,
  349. json: dict,
  350. params: QueryParamTypes | None = None,
  351. headers: Mapping[str, str] | None = None,
  352. on_response: Callable[[httpx.Response], None] | None = None,
  353. ) -> Any:
  354. """Send a `PATCH` request."""
  355. request_headers, content = await _aencode_json(json)
  356. if headers:
  357. request_headers.update(headers)
  358. r = await self.client.patch(
  359. path, headers=request_headers, content=content, params=params
  360. )
  361. if on_response:
  362. on_response(r)
  363. await _araise_for_status_typed(r)
  364. return await _adecode_json(r)
  365. async def delete(
  366. self,
  367. path: str,
  368. *,
  369. json: Any | None = None,
  370. params: QueryParamTypes | None = None,
  371. headers: Mapping[str, str] | None = None,
  372. on_response: Callable[[httpx.Response], None] | None = None,
  373. ) -> None:
  374. """Send a `DELETE` request."""
  375. r = await self.client.request(
  376. "DELETE", path, json=json, params=params, headers=headers
  377. )
  378. if on_response:
  379. on_response(r)
  380. await _araise_for_status_typed(r)
  381. async def request_reconnect(
  382. self,
  383. path: str,
  384. method: str,
  385. *,
  386. json: dict[str, Any] | None = None,
  387. params: QueryParamTypes | None = None,
  388. headers: Mapping[str, str] | None = None,
  389. on_response: Callable[[httpx.Response], None] | None = None,
  390. reconnect_limit: int = 5,
  391. ) -> Any:
  392. """Send a request that automatically reconnects to Location header."""
  393. request_headers, content = await _aencode_json(json)
  394. if headers:
  395. request_headers.update(headers)
  396. async with self.client.stream(
  397. method, path, headers=request_headers, content=content, params=params
  398. ) as r:
  399. if on_response:
  400. on_response(r)
  401. try:
  402. r.raise_for_status()
  403. except httpx.HTTPStatusError as e:
  404. body = (await r.aread()).decode()
  405. if sys.version_info >= (3, 11):
  406. e.add_note(body)
  407. else:
  408. logger.error(f"Error from langgraph-api: {body}", exc_info=e)
  409. raise e
  410. loc = r.headers.get("location")
  411. if reconnect_limit <= 0 or not loc:
  412. return await _adecode_json(r)
  413. try:
  414. return await _adecode_json(r)
  415. except httpx.HTTPError:
  416. warnings.warn(
  417. f"Request failed, attempting reconnect to Location: {loc}",
  418. stacklevel=2,
  419. )
  420. await r.aclose()
  421. return await self.request_reconnect(
  422. loc,
  423. "GET",
  424. headers=request_headers,
  425. # don't pass on_response so it's only called once
  426. reconnect_limit=reconnect_limit - 1,
  427. )
  428. async def stream(
  429. self,
  430. path: str,
  431. method: str,
  432. *,
  433. json: dict[str, Any] | None = None,
  434. params: QueryParamTypes | None = None,
  435. headers: Mapping[str, str] | None = None,
  436. on_response: Callable[[httpx.Response], None] | None = None,
  437. ) -> AsyncIterator[StreamPart]:
  438. """Stream results using SSE."""
  439. request_headers, content = await _aencode_json(json)
  440. request_headers["Accept"] = "text/event-stream"
  441. request_headers["Cache-Control"] = "no-store"
  442. # Add runtime headers with precedence
  443. if headers:
  444. request_headers.update(headers)
  445. reconnect_headers = {
  446. key: value
  447. for key, value in request_headers.items()
  448. if key.lower() not in {"content-length", "content-type"}
  449. }
  450. last_event_id: str | None = None
  451. reconnect_path: str | None = None
  452. reconnect_attempts = 0
  453. max_reconnect_attempts = 5
  454. while True:
  455. current_headers = dict(
  456. request_headers if reconnect_path is None else reconnect_headers
  457. )
  458. if last_event_id is not None:
  459. current_headers["Last-Event-ID"] = last_event_id
  460. current_method = method if reconnect_path is None else "GET"
  461. current_content = content if reconnect_path is None else None
  462. current_params = params if reconnect_path is None else None
  463. retry = False
  464. async with self.client.stream(
  465. current_method,
  466. reconnect_path or path,
  467. headers=current_headers,
  468. content=current_content,
  469. params=current_params,
  470. ) as res:
  471. if reconnect_path is None and on_response:
  472. on_response(res)
  473. # check status
  474. await _araise_for_status_typed(res)
  475. # check content type
  476. content_type = res.headers.get("content-type", "").partition(";")[0]
  477. if "text/event-stream" not in content_type:
  478. raise httpx.TransportError(
  479. "Expected response header Content-Type to contain 'text/event-stream', "
  480. f"got {content_type!r}"
  481. )
  482. reconnect_location = res.headers.get("location")
  483. if reconnect_location:
  484. reconnect_path = reconnect_location
  485. # parse SSE
  486. decoder = SSEDecoder()
  487. try:
  488. async for line in aiter_lines_raw(res):
  489. sse = decoder.decode(line=cast("bytes", line).rstrip(b"\n"))
  490. if sse is not None:
  491. if decoder.last_event_id is not None:
  492. last_event_id = decoder.last_event_id
  493. if sse.event or sse.data is not None:
  494. yield sse
  495. except httpx.HTTPError:
  496. # httpx.TransportError inherits from HTTPError, so transient
  497. # disconnects during streaming land here.
  498. if reconnect_path is None:
  499. raise
  500. retry = True
  501. else:
  502. if sse := decoder.decode(b""):
  503. if decoder.last_event_id is not None:
  504. last_event_id = decoder.last_event_id
  505. if sse.event or sse.data is not None:
  506. # decoder.decode(b"") flushes the in-flight event and may
  507. # return an empty placeholder when there is no pending
  508. # message. Skip these no-op events so the stream doesn't
  509. # emit a trailing blank item after reconnects.
  510. yield sse
  511. if retry:
  512. reconnect_attempts += 1
  513. if reconnect_attempts > max_reconnect_attempts:
  514. raise httpx.TransportError(
  515. "Exceeded maximum SSE reconnection attempts"
  516. )
  517. continue
  518. break
  519. async def _aencode_json(json: Any) -> tuple[dict[str, str], bytes | None]:
  520. if json is None:
  521. return {}, None
  522. body = await asyncio.get_running_loop().run_in_executor(
  523. None,
  524. orjson.dumps,
  525. json,
  526. _orjson_default,
  527. orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NON_STR_KEYS,
  528. )
  529. content_length = str(len(body))
  530. content_type = "application/json"
  531. headers = {"Content-Length": content_length, "Content-Type": content_type}
  532. return headers, body
  533. async def _adecode_json(r: httpx.Response) -> Any:
  534. body = await r.aread()
  535. return (
  536. await asyncio.get_running_loop().run_in_executor(None, orjson.loads, body)
  537. if body
  538. else None
  539. )
  540. class AssistantsClient:
  541. """Client for managing assistants in LangGraph.
  542. This class provides methods to interact with assistants,
  543. which are versioned configurations of your graph.
  544. ???+ example "Example"
  545. ```python
  546. client = get_client(url="http://localhost:2024")
  547. assistant = await client.assistants.get("assistant_id_123")
  548. ```
  549. """
  550. def __init__(self, http: HttpClient) -> None:
  551. self.http = http
  552. async def get(
  553. self,
  554. assistant_id: str,
  555. *,
  556. headers: Mapping[str, str] | None = None,
  557. params: QueryParamTypes | None = None,
  558. ) -> Assistant:
  559. """Get an assistant by ID.
  560. Args:
  561. assistant_id: The ID of the assistant to get.
  562. headers: Optional custom headers to include with the request.
  563. params: Optional query parameters to include with the request.
  564. Returns:
  565. Assistant: Assistant Object.
  566. ???+ example "Example Usage"
  567. ```python
  568. assistant = await client.assistants.get(
  569. assistant_id="my_assistant_id"
  570. )
  571. print(assistant)
  572. ```
  573. ```shell
  574. ----------------------------------------------------
  575. {
  576. 'assistant_id': 'my_assistant_id',
  577. 'graph_id': 'agent',
  578. 'created_at': '2024-06-25T17:10:33.109781+00:00',
  579. 'updated_at': '2024-06-25T17:10:33.109781+00:00',
  580. 'config': {},
  581. 'metadata': {'created_by': 'system'},
  582. 'version': 1,
  583. 'name': 'my_assistant'
  584. }
  585. ```
  586. """
  587. return await self.http.get(
  588. f"/assistants/{assistant_id}", headers=headers, params=params
  589. )
  590. async def get_graph(
  591. self,
  592. assistant_id: str,
  593. *,
  594. xray: int | bool = False,
  595. headers: Mapping[str, str] | None = None,
  596. params: QueryParamTypes | None = None,
  597. ) -> dict[str, list[dict[str, Any]]]:
  598. """Get the graph of an assistant by ID.
  599. Args:
  600. assistant_id: The ID of the assistant to get the graph of.
  601. xray: Include graph representation of subgraphs. If an integer value is provided, only subgraphs with a depth less than or equal to the value will be included.
  602. headers: Optional custom headers to include with the request.
  603. params: Optional query parameters to include with the request.
  604. Returns:
  605. Graph: The graph information for the assistant in JSON format.
  606. ???+ example "Example Usage"
  607. ```python
  608. client = get_client(url="http://localhost:2024")
  609. graph_info = await client.assistants.get_graph(
  610. assistant_id="my_assistant_id"
  611. )
  612. print(graph_info)
  613. ```
  614. ```shell
  615. --------------------------------------------------------------------------------------------------------------------------
  616. {
  617. 'nodes':
  618. [
  619. {'id': '__start__', 'type': 'schema', 'data': '__start__'},
  620. {'id': '__end__', 'type': 'schema', 'data': '__end__'},
  621. {'id': 'agent','type': 'runnable','data': {'id': ['langgraph', 'utils', 'RunnableCallable'],'name': 'agent'}},
  622. ],
  623. 'edges':
  624. [
  625. {'source': '__start__', 'target': 'agent'},
  626. {'source': 'agent','target': '__end__'}
  627. ]
  628. }
  629. ```
  630. """
  631. query_params = {"xray": xray}
  632. if params:
  633. query_params.update(params)
  634. return await self.http.get(
  635. f"/assistants/{assistant_id}/graph", params=query_params, headers=headers
  636. )
  637. async def get_schemas(
  638. self,
  639. assistant_id: str,
  640. *,
  641. headers: Mapping[str, str] | None = None,
  642. params: QueryParamTypes | None = None,
  643. ) -> GraphSchema:
  644. """Get the schemas of an assistant by ID.
  645. Args:
  646. assistant_id: The ID of the assistant to get the schema of.
  647. headers: Optional custom headers to include with the request.
  648. params: Optional query parameters to include with the request.
  649. Returns:
  650. GraphSchema: The graph schema for the assistant.
  651. ???+ example "Example Usage"
  652. ```python
  653. client = get_client(url="http://localhost:2024")
  654. schema = await client.assistants.get_schemas(
  655. assistant_id="my_assistant_id"
  656. )
  657. print(schema)
  658. ```
  659. ```shell
  660. ----------------------------------------------------------------------------------------------------------------------------
  661. {
  662. 'graph_id': 'agent',
  663. 'state_schema':
  664. {
  665. 'title': 'LangGraphInput',
  666. '$ref': '#/definitions/AgentState',
  667. 'definitions':
  668. {
  669. 'BaseMessage':
  670. {
  671. 'title': 'BaseMessage',
  672. 'description': 'Base abstract Message class. Messages are the inputs and outputs of ChatModels.',
  673. 'type': 'object',
  674. 'properties':
  675. {
  676. 'content':
  677. {
  678. 'title': 'Content',
  679. 'anyOf': [
  680. {'type': 'string'},
  681. {'type': 'array','items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}
  682. ]
  683. },
  684. 'additional_kwargs':
  685. {
  686. 'title': 'Additional Kwargs',
  687. 'type': 'object'
  688. },
  689. 'response_metadata':
  690. {
  691. 'title': 'Response Metadata',
  692. 'type': 'object'
  693. },
  694. 'type':
  695. {
  696. 'title': 'Type',
  697. 'type': 'string'
  698. },
  699. 'name':
  700. {
  701. 'title': 'Name',
  702. 'type': 'string'
  703. },
  704. 'id':
  705. {
  706. 'title': 'Id',
  707. 'type': 'string'
  708. }
  709. },
  710. 'required': ['content', 'type']
  711. },
  712. 'AgentState':
  713. {
  714. 'title': 'AgentState',
  715. 'type': 'object',
  716. 'properties':
  717. {
  718. 'messages':
  719. {
  720. 'title': 'Messages',
  721. 'type': 'array',
  722. 'items': {'$ref': '#/definitions/BaseMessage'}
  723. }
  724. },
  725. 'required': ['messages']
  726. }
  727. }
  728. },
  729. 'context_schema':
  730. {
  731. 'title': 'Context',
  732. 'type': 'object',
  733. 'properties':
  734. {
  735. 'model_name':
  736. {
  737. 'title': 'Model Name',
  738. 'enum': ['anthropic', 'openai'],
  739. 'type': 'string'
  740. }
  741. }
  742. }
  743. }
  744. ```
  745. """
  746. return await self.http.get(
  747. f"/assistants/{assistant_id}/schemas", headers=headers, params=params
  748. )
  749. async def get_subgraphs(
  750. self,
  751. assistant_id: str,
  752. namespace: str | None = None,
  753. recurse: bool = False,
  754. *,
  755. headers: Mapping[str, str] | None = None,
  756. params: QueryParamTypes | None = None,
  757. ) -> Subgraphs:
  758. """Get the schemas of an assistant by ID.
  759. Args:
  760. assistant_id: The ID of the assistant to get the schema of.
  761. namespace: Optional namespace to filter by.
  762. recurse: Whether to recursively get subgraphs.
  763. headers: Optional custom headers to include with the request.
  764. params: Optional query parameters to include with the request.
  765. Returns:
  766. Subgraphs: The graph schema for the assistant.
  767. """
  768. get_params = {"recurse": recurse}
  769. if params:
  770. get_params = {**get_params, **params}
  771. if namespace is not None:
  772. return await self.http.get(
  773. f"/assistants/{assistant_id}/subgraphs/{namespace}",
  774. params=get_params,
  775. headers=headers,
  776. )
  777. else:
  778. return await self.http.get(
  779. f"/assistants/{assistant_id}/subgraphs",
  780. params=get_params,
  781. headers=headers,
  782. )
  783. async def create(
  784. self,
  785. graph_id: str | None,
  786. config: Config | None = None,
  787. *,
  788. context: Context | None = None,
  789. metadata: Json = None,
  790. assistant_id: str | None = None,
  791. if_exists: OnConflictBehavior | None = None,
  792. name: str | None = None,
  793. headers: Mapping[str, str] | None = None,
  794. description: str | None = None,
  795. params: QueryParamTypes | None = None,
  796. ) -> Assistant:
  797. """Create a new assistant.
  798. Useful when graph is configurable and you want to create different assistants based on different configurations.
  799. Args:
  800. graph_id: The ID of the graph the assistant should use. The graph ID is normally set in your langgraph.json configuration.
  801. config: Configuration to use for the graph.
  802. metadata: Metadata to add to assistant.
  803. context: Static context to add to the assistant.
  804. !!! version-added "Added in version 0.6.0"
  805. assistant_id: Assistant ID to use, will default to a random UUID if not provided.
  806. if_exists: How to handle duplicate creation. Defaults to 'raise' under the hood.
  807. Must be either 'raise' (raise error if duplicate), or 'do_nothing' (return existing assistant).
  808. name: The name of the assistant. Defaults to 'Untitled' under the hood.
  809. headers: Optional custom headers to include with the request.
  810. description: Optional description of the assistant.
  811. The description field is available for langgraph-api server version>=0.0.45
  812. params: Optional query parameters to include with the request.
  813. Returns:
  814. Assistant: The created assistant.
  815. ???+ example "Example Usage"
  816. ```python
  817. client = get_client(url="http://localhost:2024")
  818. assistant = await client.assistants.create(
  819. graph_id="agent",
  820. context={"model_name": "openai"},
  821. metadata={"number":1},
  822. assistant_id="my-assistant-id",
  823. if_exists="do_nothing",
  824. name="my_name"
  825. )
  826. ```
  827. """
  828. payload: dict[str, Any] = {
  829. "graph_id": graph_id,
  830. }
  831. if config:
  832. payload["config"] = config
  833. if context:
  834. payload["context"] = context
  835. if metadata:
  836. payload["metadata"] = metadata
  837. if assistant_id:
  838. payload["assistant_id"] = assistant_id
  839. if if_exists:
  840. payload["if_exists"] = if_exists
  841. if name:
  842. payload["name"] = name
  843. if description:
  844. payload["description"] = description
  845. return await self.http.post(
  846. "/assistants", json=payload, headers=headers, params=params
  847. )
  848. async def update(
  849. self,
  850. assistant_id: str,
  851. *,
  852. graph_id: str | None = None,
  853. config: Config | None = None,
  854. context: Context | None = None,
  855. metadata: Json = None,
  856. name: str | None = None,
  857. headers: Mapping[str, str] | None = None,
  858. description: str | None = None,
  859. params: QueryParamTypes | None = None,
  860. ) -> Assistant:
  861. """Update an assistant.
  862. Use this to point to a different graph, update the configuration, or change the metadata of an assistant.
  863. Args:
  864. assistant_id: Assistant to update.
  865. graph_id: The ID of the graph the assistant should use.
  866. The graph ID is normally set in your langgraph.json configuration. If `None`, assistant will keep pointing to same graph.
  867. config: Configuration to use for the graph.
  868. context: Static context to add to the assistant.
  869. !!! version-added "Added in version 0.6.0"
  870. metadata: Metadata to merge with existing assistant metadata.
  871. name: The new name for the assistant.
  872. headers: Optional custom headers to include with the request.
  873. description: Optional description of the assistant.
  874. The description field is available for langgraph-api server version>=0.0.45
  875. params: Optional query parameters to include with the request.
  876. Returns:
  877. The updated assistant.
  878. ???+ example "Example Usage"
  879. ```python
  880. client = get_client(url="http://localhost:2024")
  881. assistant = await client.assistants.update(
  882. assistant_id='e280dad7-8618-443f-87f1-8e41841c180f',
  883. graph_id="other-graph",
  884. context={"model_name": "anthropic"},
  885. metadata={"number":2}
  886. )
  887. ```
  888. """
  889. payload: dict[str, Any] = {}
  890. if graph_id:
  891. payload["graph_id"] = graph_id
  892. if config:
  893. payload["config"] = config
  894. if context:
  895. payload["context"] = context
  896. if metadata:
  897. payload["metadata"] = metadata
  898. if name:
  899. payload["name"] = name
  900. if description:
  901. payload["description"] = description
  902. return await self.http.patch(
  903. f"/assistants/{assistant_id}",
  904. json=payload,
  905. headers=headers,
  906. params=params,
  907. )
  908. async def delete(
  909. self,
  910. assistant_id: str,
  911. *,
  912. headers: Mapping[str, str] | None = None,
  913. params: QueryParamTypes | None = None,
  914. ) -> None:
  915. """Delete an assistant.
  916. Args:
  917. assistant_id: The assistant ID to delete.
  918. headers: Optional custom headers to include with the request.
  919. params: Optional query parameters to include with the request.
  920. Returns:
  921. `None`
  922. ???+ example "Example Usage"
  923. ```python
  924. client = get_client(url="http://localhost:2024")
  925. await client.assistants.delete(
  926. assistant_id="my_assistant_id"
  927. )
  928. ```
  929. """
  930. await self.http.delete(
  931. f"/assistants/{assistant_id}", headers=headers, params=params
  932. )
  933. @overload
  934. async def search(
  935. self,
  936. *,
  937. metadata: Json = None,
  938. graph_id: str | None = None,
  939. name: str | None = None,
  940. limit: int = 10,
  941. offset: int = 0,
  942. sort_by: AssistantSortBy | None = None,
  943. sort_order: SortOrder | None = None,
  944. select: list[AssistantSelectField] | None = None,
  945. response_format: Literal["object"],
  946. headers: Mapping[str, str] | None = None,
  947. params: QueryParamTypes | None = None,
  948. ) -> AssistantsSearchResponse: ...
  949. @overload
  950. async def search(
  951. self,
  952. *,
  953. metadata: Json = None,
  954. graph_id: str | None = None,
  955. name: str | None = None,
  956. limit: int = 10,
  957. offset: int = 0,
  958. sort_by: AssistantSortBy | None = None,
  959. sort_order: SortOrder | None = None,
  960. select: list[AssistantSelectField] | None = None,
  961. response_format: Literal["array"] = "array",
  962. headers: Mapping[str, str] | None = None,
  963. params: QueryParamTypes | None = None,
  964. ) -> list[Assistant]: ...
  965. async def search(
  966. self,
  967. *,
  968. metadata: Json = None,
  969. graph_id: str | None = None,
  970. name: str | None = None,
  971. limit: int = 10,
  972. offset: int = 0,
  973. sort_by: AssistantSortBy | None = None,
  974. sort_order: SortOrder | None = None,
  975. select: list[AssistantSelectField] | None = None,
  976. response_format: Literal["array", "object"] = "array",
  977. headers: Mapping[str, str] | None = None,
  978. params: QueryParamTypes | None = None,
  979. ) -> AssistantsSearchResponse | list[Assistant]:
  980. """Search for assistants.
  981. Args:
  982. metadata: Metadata to filter by. Exact match filter for each KV pair.
  983. graph_id: The ID of the graph to filter by.
  984. The graph ID is normally set in your langgraph.json configuration.
  985. name: The name of the assistant to filter by.
  986. The filtering logic will match assistants where 'name' is a substring (case insensitive) of the assistant name.
  987. limit: The maximum number of results to return.
  988. offset: The number of results to skip.
  989. sort_by: The field to sort by.
  990. sort_order: The order to sort by.
  991. select: Specific assistant fields to include in the response.
  992. response_format: Controls the response shape. Use ``"array"`` (default)
  993. to return a bare list of assistants, or ``"object"`` to return
  994. a mapping containing assistants plus pagination metadata.
  995. Defaults to "array", though this default will be changed to "object" in a future release.
  996. headers: Optional custom headers to include with the request.
  997. params: Optional query parameters to include with the request.
  998. Returns:
  999. A list of assistants (when ``response_format=\"array\"``) or a mapping
  1000. with the assistants and the next pagination cursor (when
  1001. ``response_format=\"object\"``).
  1002. ???+ example "Example Usage"
  1003. ```python
  1004. client = get_client(url="http://localhost:2024")
  1005. response = await client.assistants.search(
  1006. metadata = {"name":"my_name"},
  1007. graph_id="my_graph_id",
  1008. limit=5,
  1009. offset=5,
  1010. response_format="object"
  1011. )
  1012. next_cursor = response["next"]
  1013. assistants = response["assistants"]
  1014. ```
  1015. """
  1016. if response_format not in ("array", "object"):
  1017. raise ValueError(
  1018. f"response_format must be 'array' or 'object', got {response_format!r}"
  1019. )
  1020. payload: dict[str, Any] = {
  1021. "limit": limit,
  1022. "offset": offset,
  1023. }
  1024. if metadata:
  1025. payload["metadata"] = metadata
  1026. if graph_id:
  1027. payload["graph_id"] = graph_id
  1028. if name:
  1029. payload["name"] = name
  1030. if sort_by:
  1031. payload["sort_by"] = sort_by
  1032. if sort_order:
  1033. payload["sort_order"] = sort_order
  1034. if select:
  1035. payload["select"] = select
  1036. next_cursor: str | None = None
  1037. def capture_pagination(response: httpx.Response) -> None:
  1038. nonlocal next_cursor
  1039. next_cursor = response.headers.get("X-Pagination-Next")
  1040. assistants = cast(
  1041. list[Assistant],
  1042. await self.http.post(
  1043. "/assistants/search",
  1044. json=payload,
  1045. headers=headers,
  1046. params=params,
  1047. on_response=capture_pagination if response_format == "object" else None,
  1048. ),
  1049. )
  1050. if response_format == "object":
  1051. return {"assistants": assistants, "next": next_cursor}
  1052. return assistants
  1053. async def count(
  1054. self,
  1055. *,
  1056. metadata: Json = None,
  1057. graph_id: str | None = None,
  1058. name: str | None = None,
  1059. headers: Mapping[str, str] | None = None,
  1060. params: QueryParamTypes | None = None,
  1061. ) -> int:
  1062. """Count assistants matching filters.
  1063. Args:
  1064. metadata: Metadata to filter by. Exact match for each key/value.
  1065. graph_id: Optional graph id to filter by.
  1066. name: Optional name to filter by.
  1067. The filtering logic will match assistants where 'name' is a substring (case insensitive) of the assistant name.
  1068. headers: Optional custom headers to include with the request.
  1069. params: Optional query parameters to include with the request.
  1070. Returns:
  1071. int: Number of assistants matching the criteria.
  1072. """
  1073. payload: dict[str, Any] = {}
  1074. if metadata:
  1075. payload["metadata"] = metadata
  1076. if graph_id:
  1077. payload["graph_id"] = graph_id
  1078. if name:
  1079. payload["name"] = name
  1080. return await self.http.post(
  1081. "/assistants/count", json=payload, headers=headers, params=params
  1082. )
  1083. async def get_versions(
  1084. self,
  1085. assistant_id: str,
  1086. metadata: Json = None,
  1087. limit: int = 10,
  1088. offset: int = 0,
  1089. *,
  1090. headers: Mapping[str, str] | None = None,
  1091. params: QueryParamTypes | None = None,
  1092. ) -> list[AssistantVersion]:
  1093. """List all versions of an assistant.
  1094. Args:
  1095. assistant_id: The assistant ID to get versions for.
  1096. metadata: Metadata to filter versions by. Exact match filter for each KV pair.
  1097. limit: The maximum number of versions to return.
  1098. offset: The number of versions to skip.
  1099. headers: Optional custom headers to include with the request.
  1100. params: Optional query parameters to include with the request.
  1101. Returns:
  1102. A list of assistant versions.
  1103. ???+ example "Example Usage"
  1104. ```python
  1105. client = get_client(url="http://localhost:2024")
  1106. assistant_versions = await client.assistants.get_versions(
  1107. assistant_id="my_assistant_id"
  1108. )
  1109. ```
  1110. """
  1111. payload: dict[str, Any] = {
  1112. "limit": limit,
  1113. "offset": offset,
  1114. }
  1115. if metadata:
  1116. payload["metadata"] = metadata
  1117. return await self.http.post(
  1118. f"/assistants/{assistant_id}/versions",
  1119. json=payload,
  1120. headers=headers,
  1121. params=params,
  1122. )
  1123. async def set_latest(
  1124. self,
  1125. assistant_id: str,
  1126. version: int,
  1127. *,
  1128. headers: Mapping[str, str] | None = None,
  1129. params: QueryParamTypes | None = None,
  1130. ) -> Assistant:
  1131. """Change the version of an assistant.
  1132. Args:
  1133. assistant_id: The assistant ID to delete.
  1134. version: The version to change to.
  1135. headers: Optional custom headers to include with the request.
  1136. params: Optional query parameters to include with the request.
  1137. Returns:
  1138. Assistant Object.
  1139. ???+ example "Example Usage"
  1140. ```python
  1141. client = get_client(url="http://localhost:2024")
  1142. new_version_assistant = await client.assistants.set_latest(
  1143. assistant_id="my_assistant_id",
  1144. version=3
  1145. )
  1146. ```
  1147. """
  1148. payload: dict[str, Any] = {"version": version}
  1149. return await self.http.post(
  1150. f"/assistants/{assistant_id}/latest",
  1151. json=payload,
  1152. headers=headers,
  1153. params=params,
  1154. )
  1155. class ThreadsClient:
  1156. """Client for managing threads in LangGraph.
  1157. A thread maintains the state of a graph across multiple interactions/invocations (aka runs).
  1158. It accumulates and persists the graph's state, allowing for continuity between separate
  1159. invocations of the graph.
  1160. ???+ example "Example"
  1161. ```python
  1162. client = get_client(url="http://localhost:2024"))
  1163. new_thread = await client.threads.create(metadata={"user_id": "123"})
  1164. ```
  1165. """
  1166. def __init__(self, http: HttpClient) -> None:
  1167. self.http = http
  1168. async def get(
  1169. self,
  1170. thread_id: str,
  1171. *,
  1172. headers: Mapping[str, str] | None = None,
  1173. params: QueryParamTypes | None = None,
  1174. ) -> Thread:
  1175. """Get a thread by ID.
  1176. Args:
  1177. thread_id: The ID of the thread to get.
  1178. headers: Optional custom headers to include with the request.
  1179. params: Optional query parameters to include with the request.
  1180. Returns:
  1181. Thread object.
  1182. ???+ example "Example Usage"
  1183. ```python
  1184. client = get_client(url="http://localhost:2024")
  1185. thread = await client.threads.get(
  1186. thread_id="my_thread_id"
  1187. )
  1188. print(thread)
  1189. ```
  1190. ```shell
  1191. -----------------------------------------------------
  1192. {
  1193. 'thread_id': 'my_thread_id',
  1194. 'created_at': '2024-07-18T18:35:15.540834+00:00',
  1195. 'updated_at': '2024-07-18T18:35:15.540834+00:00',
  1196. 'metadata': {'graph_id': 'agent'}
  1197. }
  1198. ```
  1199. """
  1200. return await self.http.get(
  1201. f"/threads/{thread_id}", headers=headers, params=params
  1202. )
  1203. async def create(
  1204. self,
  1205. *,
  1206. metadata: Json = None,
  1207. thread_id: str | None = None,
  1208. if_exists: OnConflictBehavior | None = None,
  1209. supersteps: Sequence[dict[str, Sequence[dict[str, Any]]]] | None = None,
  1210. graph_id: str | None = None,
  1211. ttl: int | Mapping[str, Any] | None = None,
  1212. headers: Mapping[str, str] | None = None,
  1213. params: QueryParamTypes | None = None,
  1214. ) -> Thread:
  1215. """Create a new thread.
  1216. Args:
  1217. metadata: Metadata to add to thread.
  1218. thread_id: ID of thread.
  1219. If `None`, ID will be a randomly generated UUID.
  1220. if_exists: How to handle duplicate creation. Defaults to 'raise' under the hood.
  1221. Must be either 'raise' (raise error if duplicate), or 'do_nothing' (return existing thread).
  1222. supersteps: Apply a list of supersteps when creating a thread, each containing a sequence of updates.
  1223. Each update has `values` or `command` and `as_node`. Used for copying a thread between deployments.
  1224. graph_id: Optional graph ID to associate with the thread.
  1225. ttl: Optional time-to-live in minutes for the thread. You can pass an
  1226. integer (minutes) or a mapping with keys `ttl` and optional
  1227. `strategy` (defaults to "delete").
  1228. headers: Optional custom headers to include with the request.
  1229. params: Optional query parameters to include with the request.
  1230. Returns:
  1231. The created thread.
  1232. ???+ example "Example Usage"
  1233. ```python
  1234. client = get_client(url="http://localhost:2024")
  1235. thread = await client.threads.create(
  1236. metadata={"number":1},
  1237. thread_id="my-thread-id",
  1238. if_exists="raise"
  1239. )
  1240. ```
  1241. """
  1242. payload: dict[str, Any] = {}
  1243. if thread_id:
  1244. payload["thread_id"] = thread_id
  1245. if metadata or graph_id:
  1246. payload["metadata"] = {
  1247. **(metadata or {}),
  1248. **({"graph_id": graph_id} if graph_id else {}),
  1249. }
  1250. if if_exists:
  1251. payload["if_exists"] = if_exists
  1252. if supersteps:
  1253. payload["supersteps"] = [
  1254. {
  1255. "updates": [
  1256. {
  1257. "values": u["values"],
  1258. "command": u.get("command"),
  1259. "as_node": u["as_node"],
  1260. }
  1261. for u in s["updates"]
  1262. ]
  1263. }
  1264. for s in supersteps
  1265. ]
  1266. if ttl is not None:
  1267. if isinstance(ttl, (int, float)):
  1268. payload["ttl"] = {"ttl": ttl, "strategy": "delete"}
  1269. else:
  1270. payload["ttl"] = ttl
  1271. return await self.http.post(
  1272. "/threads", json=payload, headers=headers, params=params
  1273. )
  1274. async def update(
  1275. self,
  1276. thread_id: str,
  1277. *,
  1278. metadata: Mapping[str, Any],
  1279. ttl: int | Mapping[str, Any] | None = None,
  1280. headers: Mapping[str, str] | None = None,
  1281. params: QueryParamTypes | None = None,
  1282. ) -> Thread:
  1283. """Update a thread.
  1284. Args:
  1285. thread_id: ID of thread to update.
  1286. metadata: Metadata to merge with existing thread metadata.
  1287. ttl: Optional time-to-live in minutes for the thread. You can pass an
  1288. integer (minutes) or a mapping with keys `ttl` and optional
  1289. `strategy` (defaults to "delete").
  1290. headers: Optional custom headers to include with the request.
  1291. params: Optional query parameters to include with the request.
  1292. Returns:
  1293. The created thread.
  1294. ???+ example "Example Usage"
  1295. ```python
  1296. client = get_client(url="http://localhost:2024")
  1297. thread = await client.threads.update(
  1298. thread_id="my-thread-id",
  1299. metadata={"number":1},
  1300. ttl=43_200,
  1301. )
  1302. ```
  1303. """
  1304. payload: dict[str, Any] = {"metadata": metadata}
  1305. if ttl is not None:
  1306. if isinstance(ttl, (int, float)):
  1307. payload["ttl"] = {"ttl": ttl, "strategy": "delete"}
  1308. else:
  1309. payload["ttl"] = ttl
  1310. return await self.http.patch(
  1311. f"/threads/{thread_id}",
  1312. json=payload,
  1313. headers=headers,
  1314. params=params,
  1315. )
  1316. async def delete(
  1317. self,
  1318. thread_id: str,
  1319. *,
  1320. headers: Mapping[str, str] | None = None,
  1321. params: QueryParamTypes | None = None,
  1322. ) -> None:
  1323. """Delete a thread.
  1324. Args:
  1325. thread_id: The ID of the thread to delete.
  1326. headers: Optional custom headers to include with the request.
  1327. params: Optional query parameters to include with the request.
  1328. Returns:
  1329. `None`
  1330. ???+ example "Example Usage"
  1331. ```python
  1332. client = get_client(url="http://localhost2024)
  1333. await client.threads.delete(
  1334. thread_id="my_thread_id"
  1335. )
  1336. ```
  1337. """
  1338. await self.http.delete(f"/threads/{thread_id}", headers=headers, params=params)
  1339. async def search(
  1340. self,
  1341. *,
  1342. metadata: Json = None,
  1343. values: Json = None,
  1344. ids: Sequence[str] | None = None,
  1345. status: ThreadStatus | None = None,
  1346. limit: int = 10,
  1347. offset: int = 0,
  1348. sort_by: ThreadSortBy | None = None,
  1349. sort_order: SortOrder | None = None,
  1350. select: list[ThreadSelectField] | None = None,
  1351. headers: Mapping[str, str] | None = None,
  1352. params: QueryParamTypes | None = None,
  1353. ) -> list[Thread]:
  1354. """Search for threads.
  1355. Args:
  1356. metadata: Thread metadata to filter on.
  1357. values: State values to filter on.
  1358. ids: List of thread IDs to filter by.
  1359. status: Thread status to filter on.
  1360. Must be one of 'idle', 'busy', 'interrupted' or 'error'.
  1361. limit: Limit on number of threads to return.
  1362. offset: Offset in threads table to start search from.
  1363. sort_by: Sort by field.
  1364. sort_order: Sort order.
  1365. headers: Optional custom headers to include with the request.
  1366. params: Optional query parameters to include with the request.
  1367. Returns:
  1368. List of the threads matching the search parameters.
  1369. ???+ example "Example Usage"
  1370. ```python
  1371. client = get_client(url="http://localhost:2024")
  1372. threads = await client.threads.search(
  1373. metadata={"number":1},
  1374. status="interrupted",
  1375. limit=15,
  1376. offset=5
  1377. )
  1378. ```
  1379. """
  1380. payload: dict[str, Any] = {
  1381. "limit": limit,
  1382. "offset": offset,
  1383. }
  1384. if metadata:
  1385. payload["metadata"] = metadata
  1386. if values:
  1387. payload["values"] = values
  1388. if ids:
  1389. payload["ids"] = ids
  1390. if status:
  1391. payload["status"] = status
  1392. if sort_by:
  1393. payload["sort_by"] = sort_by
  1394. if sort_order:
  1395. payload["sort_order"] = sort_order
  1396. if select:
  1397. payload["select"] = select
  1398. return await self.http.post(
  1399. "/threads/search",
  1400. json=payload,
  1401. headers=headers,
  1402. params=params,
  1403. )
  1404. async def count(
  1405. self,
  1406. *,
  1407. metadata: Json = None,
  1408. values: Json = None,
  1409. status: ThreadStatus | None = None,
  1410. headers: Mapping[str, str] | None = None,
  1411. params: QueryParamTypes | None = None,
  1412. ) -> int:
  1413. """Count threads matching filters.
  1414. Args:
  1415. metadata: Thread metadata to filter on.
  1416. values: State values to filter on.
  1417. status: Thread status to filter on.
  1418. headers: Optional custom headers to include with the request.
  1419. params: Optional query parameters to include with the request.
  1420. Returns:
  1421. int: Number of threads matching the criteria.
  1422. """
  1423. payload: dict[str, Any] = {}
  1424. if metadata:
  1425. payload["metadata"] = metadata
  1426. if values:
  1427. payload["values"] = values
  1428. if status:
  1429. payload["status"] = status
  1430. return await self.http.post(
  1431. "/threads/count", json=payload, headers=headers, params=params
  1432. )
  1433. async def copy(
  1434. self,
  1435. thread_id: str,
  1436. *,
  1437. headers: Mapping[str, str] | None = None,
  1438. params: QueryParamTypes | None = None,
  1439. ) -> None:
  1440. """Copy a thread.
  1441. Args:
  1442. thread_id: The ID of the thread to copy.
  1443. headers: Optional custom headers to include with the request.
  1444. params: Optional query parameters to include with the request.
  1445. Returns:
  1446. `None`
  1447. ???+ example "Example Usage"
  1448. ```python
  1449. client = get_client(url="http://localhost:2024)
  1450. await client.threads.copy(
  1451. thread_id="my_thread_id"
  1452. )
  1453. ```
  1454. """
  1455. return await self.http.post(
  1456. f"/threads/{thread_id}/copy", json=None, headers=headers, params=params
  1457. )
  1458. async def get_state(
  1459. self,
  1460. thread_id: str,
  1461. checkpoint: Checkpoint | None = None,
  1462. checkpoint_id: str | None = None, # deprecated
  1463. *,
  1464. subgraphs: bool = False,
  1465. headers: Mapping[str, str] | None = None,
  1466. params: QueryParamTypes | None = None,
  1467. ) -> ThreadState:
  1468. """Get the state of a thread.
  1469. Args:
  1470. thread_id: The ID of the thread to get the state of.
  1471. checkpoint: The checkpoint to get the state of.
  1472. checkpoint_id: (deprecated) The checkpoint ID to get the state of.
  1473. subgraphs: Include subgraphs states.
  1474. headers: Optional custom headers to include with the request.
  1475. params: Optional query parameters to include with the request.
  1476. Returns:
  1477. The thread of the state.
  1478. ???+ example "Example Usage"
  1479. ```python
  1480. client = get_client(url="http://localhost:2024)
  1481. thread_state = await client.threads.get_state(
  1482. thread_id="my_thread_id",
  1483. checkpoint_id="my_checkpoint_id"
  1484. )
  1485. print(thread_state)
  1486. ```
  1487. ```shell
  1488. ----------------------------------------------------------------------------------------------------------------------------------------------------------------------
  1489. {
  1490. 'values': {
  1491. 'messages': [
  1492. {
  1493. 'content': 'how are you?',
  1494. 'additional_kwargs': {},
  1495. 'response_metadata': {},
  1496. 'type': 'human',
  1497. 'name': None,
  1498. 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10',
  1499. 'example': False
  1500. },
  1501. {
  1502. 'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
  1503. 'additional_kwargs': {},
  1504. 'response_metadata': {},
  1505. 'type': 'ai',
  1506. 'name': None,
  1507. 'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b',
  1508. 'example': False,
  1509. 'tool_calls': [],
  1510. 'invalid_tool_calls': [],
  1511. 'usage_metadata': None
  1512. }
  1513. ]
  1514. },
  1515. 'next': [],
  1516. 'checkpoint':
  1517. {
  1518. 'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
  1519. 'checkpoint_ns': '',
  1520. 'checkpoint_id': '1ef4a9b8-e6fb-67b1-8001-abd5184439d1'
  1521. }
  1522. 'metadata':
  1523. {
  1524. 'step': 1,
  1525. 'run_id': '1ef4a9b8-d7da-679a-a45a-872054341df2',
  1526. 'source': 'loop',
  1527. 'writes':
  1528. {
  1529. 'agent':
  1530. {
  1531. 'messages': [
  1532. {
  1533. 'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b',
  1534. 'name': None,
  1535. 'type': 'ai',
  1536. 'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
  1537. 'example': False,
  1538. 'tool_calls': [],
  1539. 'usage_metadata': None,
  1540. 'additional_kwargs': {},
  1541. 'response_metadata': {},
  1542. 'invalid_tool_calls': []
  1543. }
  1544. ]
  1545. }
  1546. },
  1547. 'user_id': None,
  1548. 'graph_id': 'agent',
  1549. 'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
  1550. 'created_by': 'system',
  1551. 'assistant_id': 'fe096781-5601-53d2-b2f6-0d3403f7e9ca'},
  1552. 'created_at': '2024-07-25T15:35:44.184703+00:00',
  1553. 'parent_config':
  1554. {
  1555. 'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
  1556. 'checkpoint_ns': '',
  1557. 'checkpoint_id': '1ef4a9b8-d80d-6fa7-8000-9300467fad0f'
  1558. }
  1559. }
  1560. ```
  1561. """
  1562. if checkpoint:
  1563. return await self.http.post(
  1564. f"/threads/{thread_id}/state/checkpoint",
  1565. json={"checkpoint": checkpoint, "subgraphs": subgraphs},
  1566. headers=headers,
  1567. params=params,
  1568. )
  1569. elif checkpoint_id:
  1570. get_params = {"subgraphs": subgraphs}
  1571. if params:
  1572. get_params = {**get_params, **params}
  1573. return await self.http.get(
  1574. f"/threads/{thread_id}/state/{checkpoint_id}",
  1575. params=get_params,
  1576. headers=headers,
  1577. )
  1578. else:
  1579. get_params = {"subgraphs": subgraphs}
  1580. if params:
  1581. get_params = {**get_params, **params}
  1582. return await self.http.get(
  1583. f"/threads/{thread_id}/state",
  1584. params=get_params,
  1585. headers=headers,
  1586. )
  1587. async def update_state(
  1588. self,
  1589. thread_id: str,
  1590. values: dict[str, Any] | Sequence[dict] | None,
  1591. *,
  1592. as_node: str | None = None,
  1593. checkpoint: Checkpoint | None = None,
  1594. checkpoint_id: str | None = None, # deprecated
  1595. headers: Mapping[str, str] | None = None,
  1596. params: QueryParamTypes | None = None,
  1597. ) -> ThreadUpdateStateResponse:
  1598. """Update the state of a thread.
  1599. Args:
  1600. thread_id: The ID of the thread to update.
  1601. values: The values to update the state with.
  1602. as_node: Update the state as if this node had just executed.
  1603. checkpoint: The checkpoint to update the state of.
  1604. checkpoint_id: (deprecated) The checkpoint ID to update the state of.
  1605. headers: Optional custom headers to include with the request.
  1606. params: Optional query parameters to include with the request.
  1607. Returns:
  1608. Response after updating a thread's state.
  1609. ???+ example "Example Usage"
  1610. ```python
  1611. client = get_client(url="http://localhost:2024)
  1612. response = await client.threads.update_state(
  1613. thread_id="my_thread_id",
  1614. values={"messages":[{"role": "user", "content": "hello!"}]},
  1615. as_node="my_node",
  1616. )
  1617. print(response)
  1618. ```
  1619. ```shell
  1620. ----------------------------------------------------------------------------------------------------------------------------------------------------------------------
  1621. {
  1622. 'checkpoint': {
  1623. 'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
  1624. 'checkpoint_ns': '',
  1625. 'checkpoint_id': '1ef4a9b8-e6fb-67b1-8001-abd5184439d1',
  1626. 'checkpoint_map': {}
  1627. }
  1628. }
  1629. ```
  1630. """
  1631. payload: dict[str, Any] = {
  1632. "values": values,
  1633. }
  1634. if checkpoint_id:
  1635. payload["checkpoint_id"] = checkpoint_id
  1636. if checkpoint:
  1637. payload["checkpoint"] = checkpoint
  1638. if as_node:
  1639. payload["as_node"] = as_node
  1640. return await self.http.post(
  1641. f"/threads/{thread_id}/state", json=payload, headers=headers, params=params
  1642. )
  1643. async def get_history(
  1644. self,
  1645. thread_id: str,
  1646. *,
  1647. limit: int = 10,
  1648. before: str | Checkpoint | None = None,
  1649. metadata: Mapping[str, Any] | None = None,
  1650. checkpoint: Checkpoint | None = None,
  1651. headers: Mapping[str, str] | None = None,
  1652. params: QueryParamTypes | None = None,
  1653. ) -> list[ThreadState]:
  1654. """Get the state history of a thread.
  1655. Args:
  1656. thread_id: The ID of the thread to get the state history for.
  1657. checkpoint: Return states for this subgraph. If empty defaults to root.
  1658. limit: The maximum number of states to return.
  1659. before: Return states before this checkpoint.
  1660. metadata: Filter states by metadata key-value pairs.
  1661. headers: Optional custom headers to include with the request.
  1662. params: Optional query parameters to include with the request.
  1663. Returns:
  1664. The state history of the thread.
  1665. ???+ example "Example Usage"
  1666. ```python
  1667. client = get_client(url="http://localhost:2024)
  1668. thread_state = await client.threads.get_history(
  1669. thread_id="my_thread_id",
  1670. limit=5,
  1671. )
  1672. ```
  1673. """
  1674. payload: dict[str, Any] = {
  1675. "limit": limit,
  1676. }
  1677. if before:
  1678. payload["before"] = before
  1679. if metadata:
  1680. payload["metadata"] = metadata
  1681. if checkpoint:
  1682. payload["checkpoint"] = checkpoint
  1683. return await self.http.post(
  1684. f"/threads/{thread_id}/history",
  1685. json=payload,
  1686. headers=headers,
  1687. params=params,
  1688. )
  1689. async def join_stream(
  1690. self,
  1691. thread_id: str,
  1692. *,
  1693. last_event_id: str | None = None,
  1694. stream_mode: ThreadStreamMode | Sequence[ThreadStreamMode] = "run_modes",
  1695. headers: Mapping[str, str] | None = None,
  1696. params: QueryParamTypes | None = None,
  1697. ) -> AsyncIterator[StreamPart]:
  1698. """Get a stream of events for a thread.
  1699. Args:
  1700. thread_id: The ID of the thread to get the stream for.
  1701. last_event_id: The ID of the last event to get.
  1702. headers: Optional custom headers to include with the request.
  1703. params: Optional query parameters to include with the request.
  1704. Returns:
  1705. An iterator of stream parts.
  1706. ???+ example "Example Usage"
  1707. ```python
  1708. for chunk in client.threads.join_stream(
  1709. thread_id="my_thread_id",
  1710. last_event_id="my_event_id",
  1711. ):
  1712. print(chunk)
  1713. ```
  1714. """
  1715. query_params = {
  1716. "stream_mode": stream_mode,
  1717. }
  1718. if params:
  1719. query_params.update(params)
  1720. return self.http.stream(
  1721. f"/threads/{thread_id}/stream",
  1722. "GET",
  1723. headers={
  1724. **({"Last-Event-ID": last_event_id} if last_event_id else {}),
  1725. **(headers or {}),
  1726. },
  1727. params=query_params,
  1728. )
  1729. class RunsClient:
  1730. """Client for managing runs in LangGraph.
  1731. A run is a single assistant invocation with optional input, config, context, and metadata.
  1732. This client manages runs, which can be stateful (on threads) or stateless.
  1733. ???+ example "Example"
  1734. ```python
  1735. client = get_client(url="http://localhost:2024")
  1736. run = await client.runs.create(assistant_id="asst_123", thread_id="thread_456", input={"query": "Hello"})
  1737. ```
  1738. """
  1739. def __init__(self, http: HttpClient) -> None:
  1740. self.http = http
  1741. @overload
  1742. def stream(
  1743. self,
  1744. thread_id: str,
  1745. assistant_id: str,
  1746. *,
  1747. input: Input | None = None,
  1748. command: Command | None = None,
  1749. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  1750. stream_subgraphs: bool = False,
  1751. stream_resumable: bool = False,
  1752. metadata: Mapping[str, Any] | None = None,
  1753. config: Config | None = None,
  1754. context: Context | None = None,
  1755. checkpoint: Checkpoint | None = None,
  1756. checkpoint_id: str | None = None,
  1757. checkpoint_during: bool | None = None,
  1758. interrupt_before: All | Sequence[str] | None = None,
  1759. interrupt_after: All | Sequence[str] | None = None,
  1760. feedback_keys: Sequence[str] | None = None,
  1761. on_disconnect: DisconnectMode | None = None,
  1762. webhook: str | None = None,
  1763. multitask_strategy: MultitaskStrategy | None = None,
  1764. if_not_exists: IfNotExists | None = None,
  1765. after_seconds: int | None = None,
  1766. headers: Mapping[str, str] | None = None,
  1767. params: QueryParamTypes | None = None,
  1768. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  1769. ) -> AsyncIterator[StreamPart]: ...
  1770. @overload
  1771. def stream(
  1772. self,
  1773. thread_id: None,
  1774. assistant_id: str,
  1775. *,
  1776. input: Input | None = None,
  1777. command: Command | None = None,
  1778. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  1779. stream_subgraphs: bool = False,
  1780. stream_resumable: bool = False,
  1781. metadata: Mapping[str, Any] | None = None,
  1782. config: Config | None = None,
  1783. checkpoint_during: bool | None = None,
  1784. interrupt_before: All | Sequence[str] | None = None,
  1785. interrupt_after: All | Sequence[str] | None = None,
  1786. feedback_keys: Sequence[str] | None = None,
  1787. on_disconnect: DisconnectMode | None = None,
  1788. on_completion: OnCompletionBehavior | None = None,
  1789. if_not_exists: IfNotExists | None = None,
  1790. webhook: str | None = None,
  1791. after_seconds: int | None = None,
  1792. headers: Mapping[str, str] | None = None,
  1793. params: QueryParamTypes | None = None,
  1794. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  1795. ) -> AsyncIterator[StreamPart]: ...
  1796. def stream(
  1797. self,
  1798. thread_id: str | None,
  1799. assistant_id: str,
  1800. *,
  1801. input: Input | None = None,
  1802. command: Command | None = None,
  1803. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  1804. stream_subgraphs: bool = False,
  1805. stream_resumable: bool = False,
  1806. metadata: Mapping[str, Any] | None = None,
  1807. config: Config | None = None,
  1808. context: Context | None = None,
  1809. checkpoint: Checkpoint | None = None,
  1810. checkpoint_id: str | None = None,
  1811. checkpoint_during: bool | None = None, # deprecated
  1812. interrupt_before: All | Sequence[str] | None = None,
  1813. interrupt_after: All | Sequence[str] | None = None,
  1814. feedback_keys: Sequence[str] | None = None,
  1815. on_disconnect: DisconnectMode | None = None,
  1816. on_completion: OnCompletionBehavior | None = None,
  1817. webhook: str | None = None,
  1818. multitask_strategy: MultitaskStrategy | None = None,
  1819. if_not_exists: IfNotExists | None = None,
  1820. after_seconds: int | None = None,
  1821. headers: Mapping[str, str] | None = None,
  1822. params: QueryParamTypes | None = None,
  1823. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  1824. durability: Durability | None = None,
  1825. ) -> AsyncIterator[StreamPart]:
  1826. """Create a run and stream the results.
  1827. Args:
  1828. thread_id: the thread ID to assign to the thread.
  1829. If `None` will create a stateless run.
  1830. assistant_id: The assistant ID or graph name to stream from.
  1831. If using graph name, will default to first assistant created from that graph.
  1832. input: The input to the graph.
  1833. command: A command to execute. Cannot be combined with input.
  1834. stream_mode: The stream mode(s) to use.
  1835. stream_subgraphs: Whether to stream output from subgraphs.
  1836. stream_resumable: Whether the stream is considered resumable.
  1837. If true, the stream can be resumed and replayed in its entirety even after disconnection.
  1838. metadata: Metadata to assign to the run.
  1839. config: The configuration for the assistant.
  1840. context: Static context to add to the assistant.
  1841. !!! version-added "Added in version 0.6.0"
  1842. checkpoint: The checkpoint to resume from.
  1843. checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
  1844. interrupt_before: Nodes to interrupt immediately before they get executed.
  1845. interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
  1846. feedback_keys: Feedback keys to assign to run.
  1847. on_disconnect: The disconnect mode to use.
  1848. Must be one of 'cancel' or 'continue'.
  1849. on_completion: Whether to delete or keep the thread created for a stateless run.
  1850. Must be one of 'delete' or 'keep'.
  1851. webhook: Webhook to call after LangGraph API call is done.
  1852. multitask_strategy: Multitask strategy to use.
  1853. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
  1854. if_not_exists: How to handle missing thread. Defaults to 'reject'.
  1855. Must be either 'reject' (raise error if missing), or 'create' (create new thread).
  1856. after_seconds: The number of seconds to wait before starting the run.
  1857. Use to schedule future runs.
  1858. headers: Optional custom headers to include with the request.
  1859. params: Optional query parameters to include with the request.
  1860. on_run_created: Callback when a run is created.
  1861. durability: The durability to use for the run. Values are "sync", "async", or "exit".
  1862. "async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
  1863. "sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
  1864. "exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
  1865. Returns:
  1866. Asynchronous iterator of stream results.
  1867. ???+ example "Example Usage"
  1868. ```python
  1869. client = get_client(url="http://localhost:2024)
  1870. async for chunk in client.runs.stream(
  1871. thread_id=None,
  1872. assistant_id="agent",
  1873. input={"messages": [{"role": "user", "content": "how are you?"}]},
  1874. stream_mode=["values","debug"],
  1875. metadata={"name":"my_run"},
  1876. context={"model_name": "anthropic"},
  1877. interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
  1878. interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
  1879. feedback_keys=["my_feedback_key_1","my_feedback_key_2"],
  1880. webhook="https://my.fake.webhook.com",
  1881. multitask_strategy="interrupt"
  1882. ):
  1883. print(chunk)
  1884. ```
  1885. ```shell
  1886. ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  1887. StreamPart(event='metadata', data={'run_id': '1ef4a9b8-d7da-679a-a45a-872054341df2'})
  1888. StreamPart(event='values', data={'messages': [{'content': 'how are you?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10', 'example': False}]})
  1889. StreamPart(event='values', data={'messages': [{'content': 'how are you?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10', 'example': False}, {'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.", 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'ai', 'name': None, 'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b', 'example': False, 'tool_calls': [], 'invalid_tool_calls': [], 'usage_metadata': None}]})
  1890. StreamPart(event='end', data=None)
  1891. ```
  1892. """
  1893. if checkpoint_during is not None:
  1894. warnings.warn(
  1895. "`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
  1896. DeprecationWarning,
  1897. stacklevel=2,
  1898. )
  1899. payload = {
  1900. "input": input,
  1901. "command": (
  1902. {k: v for k, v in command.items() if v is not None} if command else None
  1903. ),
  1904. "config": config,
  1905. "context": context,
  1906. "metadata": metadata,
  1907. "stream_mode": stream_mode,
  1908. "stream_subgraphs": stream_subgraphs,
  1909. "stream_resumable": stream_resumable,
  1910. "assistant_id": assistant_id,
  1911. "interrupt_before": interrupt_before,
  1912. "interrupt_after": interrupt_after,
  1913. "feedback_keys": feedback_keys,
  1914. "webhook": webhook,
  1915. "checkpoint": checkpoint,
  1916. "checkpoint_id": checkpoint_id,
  1917. "checkpoint_during": checkpoint_during,
  1918. "multitask_strategy": multitask_strategy,
  1919. "if_not_exists": if_not_exists,
  1920. "on_disconnect": on_disconnect,
  1921. "on_completion": on_completion,
  1922. "after_seconds": after_seconds,
  1923. "durability": durability,
  1924. }
  1925. endpoint = (
  1926. f"/threads/{thread_id}/runs/stream"
  1927. if thread_id is not None
  1928. else "/runs/stream"
  1929. )
  1930. def on_response(res: httpx.Response):
  1931. """Callback function to handle the response."""
  1932. if on_run_created and (metadata := _get_run_metadata_from_response(res)):
  1933. on_run_created(metadata)
  1934. return self.http.stream(
  1935. endpoint,
  1936. "POST",
  1937. json={k: v for k, v in payload.items() if v is not None},
  1938. params=params,
  1939. headers=headers,
  1940. on_response=on_response if on_run_created else None,
  1941. )
  1942. @overload
  1943. async def create(
  1944. self,
  1945. thread_id: None,
  1946. assistant_id: str,
  1947. *,
  1948. input: Input | None = None,
  1949. command: Command | None = None,
  1950. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  1951. stream_subgraphs: bool = False,
  1952. stream_resumable: bool = False,
  1953. metadata: Mapping[str, Any] | None = None,
  1954. checkpoint_during: bool | None = None,
  1955. config: Config | None = None,
  1956. context: Context | None = None,
  1957. interrupt_before: All | Sequence[str] | None = None,
  1958. interrupt_after: All | Sequence[str] | None = None,
  1959. webhook: str | None = None,
  1960. on_completion: OnCompletionBehavior | None = None,
  1961. if_not_exists: IfNotExists | None = None,
  1962. after_seconds: int | None = None,
  1963. headers: Mapping[str, str] | None = None,
  1964. params: QueryParamTypes | None = None,
  1965. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  1966. ) -> Run: ...
  1967. @overload
  1968. async def create(
  1969. self,
  1970. thread_id: str,
  1971. assistant_id: str,
  1972. *,
  1973. input: Input | None = None,
  1974. command: Command | None = None,
  1975. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  1976. stream_subgraphs: bool = False,
  1977. stream_resumable: bool = False,
  1978. metadata: Mapping[str, Any] | None = None,
  1979. config: Config | None = None,
  1980. context: Context | None = None,
  1981. checkpoint: Checkpoint | None = None,
  1982. checkpoint_id: str | None = None,
  1983. checkpoint_during: bool | None = None,
  1984. interrupt_before: All | Sequence[str] | None = None,
  1985. interrupt_after: All | Sequence[str] | None = None,
  1986. webhook: str | None = None,
  1987. multitask_strategy: MultitaskStrategy | None = None,
  1988. if_not_exists: IfNotExists | None = None,
  1989. after_seconds: int | None = None,
  1990. headers: Mapping[str, str] | None = None,
  1991. params: QueryParamTypes | None = None,
  1992. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  1993. ) -> Run: ...
  1994. async def create(
  1995. self,
  1996. thread_id: str | None,
  1997. assistant_id: str,
  1998. *,
  1999. input: Input | None = None,
  2000. command: Command | None = None,
  2001. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  2002. stream_subgraphs: bool = False,
  2003. stream_resumable: bool = False,
  2004. metadata: Mapping[str, Any] | None = None,
  2005. config: Config | None = None,
  2006. context: Context | None = None,
  2007. checkpoint: Checkpoint | None = None,
  2008. checkpoint_id: str | None = None,
  2009. checkpoint_during: bool | None = None, # deprecated
  2010. interrupt_before: All | Sequence[str] | None = None,
  2011. interrupt_after: All | Sequence[str] | None = None,
  2012. webhook: str | None = None,
  2013. multitask_strategy: MultitaskStrategy | None = None,
  2014. if_not_exists: IfNotExists | None = None,
  2015. on_completion: OnCompletionBehavior | None = None,
  2016. after_seconds: int | None = None,
  2017. headers: Mapping[str, str] | None = None,
  2018. params: QueryParamTypes | None = None,
  2019. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  2020. durability: Durability | None = None,
  2021. ) -> Run:
  2022. """Create a background run.
  2023. Args:
  2024. thread_id: the thread ID to assign to the thread.
  2025. If `None` will create a stateless run.
  2026. assistant_id: The assistant ID or graph name to stream from.
  2027. If using graph name, will default to first assistant created from that graph.
  2028. input: The input to the graph.
  2029. command: A command to execute. Cannot be combined with input.
  2030. stream_mode: The stream mode(s) to use.
  2031. stream_subgraphs: Whether to stream output from subgraphs.
  2032. stream_resumable: Whether the stream is considered resumable.
  2033. If true, the stream can be resumed and replayed in its entirety even after disconnection.
  2034. metadata: Metadata to assign to the run.
  2035. config: The configuration for the assistant.
  2036. context: Static context to add to the assistant.
  2037. !!! version-added "Added in version 0.6.0"
  2038. checkpoint: The checkpoint to resume from.
  2039. checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
  2040. interrupt_before: Nodes to interrupt immediately before they get executed.
  2041. interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
  2042. webhook: Webhook to call after LangGraph API call is done.
  2043. multitask_strategy: Multitask strategy to use.
  2044. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
  2045. on_completion: Whether to delete or keep the thread created for a stateless run.
  2046. Must be one of 'delete' or 'keep'.
  2047. if_not_exists: How to handle missing thread. Defaults to 'reject'.
  2048. Must be either 'reject' (raise error if missing), or 'create' (create new thread).
  2049. after_seconds: The number of seconds to wait before starting the run.
  2050. Use to schedule future runs.
  2051. headers: Optional custom headers to include with the request.
  2052. on_run_created: Optional callback to call when a run is created.
  2053. durability: The durability to use for the run. Values are "sync", "async", or "exit".
  2054. "async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
  2055. "sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
  2056. "exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
  2057. Returns:
  2058. The created background run.
  2059. ???+ example "Example Usage"
  2060. ```python
  2061. background_run = await client.runs.create(
  2062. thread_id="my_thread_id",
  2063. assistant_id="my_assistant_id",
  2064. input={"messages": [{"role": "user", "content": "hello!"}]},
  2065. metadata={"name":"my_run"},
  2066. context={"model_name": "openai"},
  2067. interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
  2068. interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
  2069. webhook="https://my.fake.webhook.com",
  2070. multitask_strategy="interrupt"
  2071. )
  2072. print(background_run)
  2073. ```
  2074. ```shell
  2075. --------------------------------------------------------------------------------
  2076. {
  2077. 'run_id': 'my_run_id',
  2078. 'thread_id': 'my_thread_id',
  2079. 'assistant_id': 'my_assistant_id',
  2080. 'created_at': '2024-07-25T15:35:42.598503+00:00',
  2081. 'updated_at': '2024-07-25T15:35:42.598503+00:00',
  2082. 'metadata': {},
  2083. 'status': 'pending',
  2084. 'kwargs':
  2085. {
  2086. 'input':
  2087. {
  2088. 'messages': [
  2089. {
  2090. 'role': 'user',
  2091. 'content': 'how are you?'
  2092. }
  2093. ]
  2094. },
  2095. 'config':
  2096. {
  2097. 'metadata':
  2098. {
  2099. 'created_by': 'system'
  2100. },
  2101. 'configurable':
  2102. {
  2103. 'run_id': 'my_run_id',
  2104. 'user_id': None,
  2105. 'graph_id': 'agent',
  2106. 'thread_id': 'my_thread_id',
  2107. 'checkpoint_id': None,
  2108. 'assistant_id': 'my_assistant_id'
  2109. },
  2110. },
  2111. 'context':
  2112. {
  2113. 'model_name': 'openai'
  2114. }
  2115. 'webhook': "https://my.fake.webhook.com",
  2116. 'temporary': False,
  2117. 'stream_mode': ['values'],
  2118. 'feedback_keys': None,
  2119. 'interrupt_after': ["node_to_stop_after_1","node_to_stop_after_2"],
  2120. 'interrupt_before': ["node_to_stop_before_1","node_to_stop_before_2"]
  2121. },
  2122. 'multitask_strategy': 'interrupt'
  2123. }
  2124. ```
  2125. """
  2126. if checkpoint_during is not None:
  2127. warnings.warn(
  2128. "`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
  2129. DeprecationWarning,
  2130. stacklevel=2,
  2131. )
  2132. payload = {
  2133. "input": input,
  2134. "command": (
  2135. {k: v for k, v in command.items() if v is not None} if command else None
  2136. ),
  2137. "stream_mode": stream_mode,
  2138. "stream_subgraphs": stream_subgraphs,
  2139. "stream_resumable": stream_resumable,
  2140. "config": config,
  2141. "context": context,
  2142. "metadata": metadata,
  2143. "assistant_id": assistant_id,
  2144. "interrupt_before": interrupt_before,
  2145. "interrupt_after": interrupt_after,
  2146. "webhook": webhook,
  2147. "checkpoint": checkpoint,
  2148. "checkpoint_id": checkpoint_id,
  2149. "checkpoint_during": checkpoint_during,
  2150. "multitask_strategy": multitask_strategy,
  2151. "if_not_exists": if_not_exists,
  2152. "on_completion": on_completion,
  2153. "after_seconds": after_seconds,
  2154. "durability": durability,
  2155. }
  2156. payload = {k: v for k, v in payload.items() if v is not None}
  2157. def on_response(res: httpx.Response):
  2158. """Callback function to handle the response."""
  2159. if on_run_created and (metadata := _get_run_metadata_from_response(res)):
  2160. on_run_created(metadata)
  2161. return await self.http.post(
  2162. f"/threads/{thread_id}/runs" if thread_id else "/runs",
  2163. json=payload,
  2164. params=params,
  2165. headers=headers,
  2166. on_response=on_response if on_run_created else None,
  2167. )
  2168. async def create_batch(
  2169. self,
  2170. payloads: list[RunCreate],
  2171. *,
  2172. headers: Mapping[str, str] | None = None,
  2173. params: QueryParamTypes | None = None,
  2174. ) -> list[Run]:
  2175. """Create a batch of stateless background runs."""
  2176. def filter_payload(payload: RunCreate):
  2177. return {k: v for k, v in payload.items() if v is not None}
  2178. filtered = [filter_payload(payload) for payload in payloads]
  2179. return await self.http.post(
  2180. "/runs/batch", json=filtered, headers=headers, params=params
  2181. )
  2182. @overload
  2183. async def wait(
  2184. self,
  2185. thread_id: str,
  2186. assistant_id: str,
  2187. *,
  2188. input: Input | None = None,
  2189. command: Command | None = None,
  2190. metadata: Mapping[str, Any] | None = None,
  2191. config: Config | None = None,
  2192. context: Context | None = None,
  2193. checkpoint: Checkpoint | None = None,
  2194. checkpoint_id: str | None = None,
  2195. checkpoint_during: bool | None = None,
  2196. interrupt_before: All | Sequence[str] | None = None,
  2197. interrupt_after: All | Sequence[str] | None = None,
  2198. webhook: str | None = None,
  2199. on_disconnect: DisconnectMode | None = None,
  2200. multitask_strategy: MultitaskStrategy | None = None,
  2201. if_not_exists: IfNotExists | None = None,
  2202. after_seconds: int | None = None,
  2203. raise_error: bool = True,
  2204. headers: Mapping[str, str] | None = None,
  2205. params: QueryParamTypes | None = None,
  2206. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  2207. ) -> list[dict] | dict[str, Any]: ...
  2208. @overload
  2209. async def wait(
  2210. self,
  2211. thread_id: None,
  2212. assistant_id: str,
  2213. *,
  2214. input: Input | None = None,
  2215. command: Command | None = None,
  2216. metadata: Mapping[str, Any] | None = None,
  2217. config: Config | None = None,
  2218. context: Context | None = None,
  2219. checkpoint_during: bool | None = None,
  2220. interrupt_before: All | Sequence[str] | None = None,
  2221. interrupt_after: All | Sequence[str] | None = None,
  2222. webhook: str | None = None,
  2223. on_disconnect: DisconnectMode | None = None,
  2224. on_completion: OnCompletionBehavior | None = None,
  2225. if_not_exists: IfNotExists | None = None,
  2226. after_seconds: int | None = None,
  2227. raise_error: bool = True,
  2228. headers: Mapping[str, str] | None = None,
  2229. params: QueryParamTypes | None = None,
  2230. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  2231. ) -> list[dict] | dict[str, Any]: ...
  2232. async def wait(
  2233. self,
  2234. thread_id: str | None,
  2235. assistant_id: str,
  2236. *,
  2237. input: Input | None = None,
  2238. command: Command | None = None,
  2239. metadata: Mapping[str, Any] | None = None,
  2240. config: Config | None = None,
  2241. context: Context | None = None,
  2242. checkpoint: Checkpoint | None = None,
  2243. checkpoint_id: str | None = None,
  2244. checkpoint_during: bool | None = None, # deprecated
  2245. interrupt_before: All | Sequence[str] | None = None,
  2246. interrupt_after: All | Sequence[str] | None = None,
  2247. webhook: str | None = None,
  2248. on_disconnect: DisconnectMode | None = None,
  2249. on_completion: OnCompletionBehavior | None = None,
  2250. multitask_strategy: MultitaskStrategy | None = None,
  2251. if_not_exists: IfNotExists | None = None,
  2252. after_seconds: int | None = None,
  2253. raise_error: bool = True,
  2254. headers: Mapping[str, str] | None = None,
  2255. params: QueryParamTypes | None = None,
  2256. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  2257. durability: Durability | None = None,
  2258. ) -> list[dict] | dict[str, Any]:
  2259. """Create a run, wait until it finishes and return the final state.
  2260. Args:
  2261. thread_id: the thread ID to create the run on.
  2262. If `None` will create a stateless run.
  2263. assistant_id: The assistant ID or graph name to run.
  2264. If using graph name, will default to first assistant created from that graph.
  2265. input: The input to the graph.
  2266. command: A command to execute. Cannot be combined with input.
  2267. metadata: Metadata to assign to the run.
  2268. config: The configuration for the assistant.
  2269. context: Static context to add to the assistant.
  2270. !!! version-added "Added in version 0.6.0"
  2271. checkpoint: The checkpoint to resume from.
  2272. checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
  2273. interrupt_before: Nodes to interrupt immediately before they get executed.
  2274. interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
  2275. webhook: Webhook to call after LangGraph API call is done.
  2276. on_disconnect: The disconnect mode to use.
  2277. Must be one of 'cancel' or 'continue'.
  2278. on_completion: Whether to delete or keep the thread created for a stateless run.
  2279. Must be one of 'delete' or 'keep'.
  2280. multitask_strategy: Multitask strategy to use.
  2281. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
  2282. if_not_exists: How to handle missing thread. Defaults to 'reject'.
  2283. Must be either 'reject' (raise error if missing), or 'create' (create new thread).
  2284. after_seconds: The number of seconds to wait before starting the run.
  2285. Use to schedule future runs.
  2286. headers: Optional custom headers to include with the request.
  2287. on_run_created: Optional callback to call when a run is created.
  2288. durability: The durability to use for the run. Values are "sync", "async", or "exit".
  2289. "async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
  2290. "sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
  2291. "exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
  2292. Returns:
  2293. The output of the run.
  2294. ???+ example "Example Usage"
  2295. ```python
  2296. client = get_client(url="http://localhost:2024")
  2297. final_state_of_run = await client.runs.wait(
  2298. thread_id=None,
  2299. assistant_id="agent",
  2300. input={"messages": [{"role": "user", "content": "how are you?"}]},
  2301. metadata={"name":"my_run"},
  2302. context={"model_name": "anthropic"},
  2303. interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
  2304. interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
  2305. webhook="https://my.fake.webhook.com",
  2306. multitask_strategy="interrupt"
  2307. )
  2308. print(final_state_of_run)
  2309. ```
  2310. ```shell
  2311. -------------------------------------------------------------------------------------------------------------------------------------------
  2312. {
  2313. 'messages': [
  2314. {
  2315. 'content': 'how are you?',
  2316. 'additional_kwargs': {},
  2317. 'response_metadata': {},
  2318. 'type': 'human',
  2319. 'name': None,
  2320. 'id': 'f51a862c-62fe-4866-863b-b0863e8ad78a',
  2321. 'example': False
  2322. },
  2323. {
  2324. 'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
  2325. 'additional_kwargs': {},
  2326. 'response_metadata': {},
  2327. 'type': 'ai',
  2328. 'name': None,
  2329. 'id': 'run-bf1cd3c6-768f-4c16-b62d-ba6f17ad8b36',
  2330. 'example': False,
  2331. 'tool_calls': [],
  2332. 'invalid_tool_calls': [],
  2333. 'usage_metadata': None
  2334. }
  2335. ]
  2336. }
  2337. ```
  2338. """
  2339. if checkpoint_during is not None:
  2340. warnings.warn(
  2341. "`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
  2342. DeprecationWarning,
  2343. stacklevel=2,
  2344. )
  2345. payload = {
  2346. "input": input,
  2347. "command": (
  2348. {k: v for k, v in command.items() if v is not None} if command else None
  2349. ),
  2350. "config": config,
  2351. "context": context,
  2352. "metadata": metadata,
  2353. "assistant_id": assistant_id,
  2354. "interrupt_before": interrupt_before,
  2355. "interrupt_after": interrupt_after,
  2356. "webhook": webhook,
  2357. "checkpoint": checkpoint,
  2358. "checkpoint_id": checkpoint_id,
  2359. "multitask_strategy": multitask_strategy,
  2360. "checkpoint_during": checkpoint_during,
  2361. "if_not_exists": if_not_exists,
  2362. "on_disconnect": on_disconnect,
  2363. "on_completion": on_completion,
  2364. "after_seconds": after_seconds,
  2365. "durability": durability,
  2366. }
  2367. endpoint = (
  2368. f"/threads/{thread_id}/runs/wait" if thread_id is not None else "/runs/wait"
  2369. )
  2370. def on_response(res: httpx.Response):
  2371. """Callback function to handle the response."""
  2372. if on_run_created and (metadata := _get_run_metadata_from_response(res)):
  2373. on_run_created(metadata)
  2374. response = await self.http.request_reconnect(
  2375. endpoint,
  2376. "POST",
  2377. json={k: v for k, v in payload.items() if v is not None},
  2378. params=params,
  2379. headers=headers,
  2380. on_response=on_response if on_run_created else None,
  2381. )
  2382. if (
  2383. raise_error
  2384. and isinstance(response, dict)
  2385. and "__error__" in response
  2386. and isinstance(response["__error__"], dict)
  2387. ):
  2388. raise Exception(
  2389. f"{response['__error__'].get('error')}: {response['__error__'].get('message')}"
  2390. )
  2391. return response
  2392. async def list(
  2393. self,
  2394. thread_id: str,
  2395. *,
  2396. limit: int = 10,
  2397. offset: int = 0,
  2398. status: RunStatus | None = None,
  2399. select: list[RunSelectField] | None = None,
  2400. headers: Mapping[str, str] | None = None,
  2401. params: QueryParamTypes | None = None,
  2402. ) -> list[Run]:
  2403. """List runs.
  2404. Args:
  2405. thread_id: The thread ID to list runs for.
  2406. limit: The maximum number of results to return.
  2407. offset: The number of results to skip.
  2408. status: The status of the run to filter by.
  2409. headers: Optional custom headers to include with the request.
  2410. params: Optional query parameters to include with the request.
  2411. Returns:
  2412. The runs for the thread.
  2413. ???+ example "Example Usage"
  2414. ```python
  2415. client = get_client(url="http://localhost:2024")
  2416. await client.runs.list(
  2417. thread_id="thread_id",
  2418. limit=5,
  2419. offset=5,
  2420. )
  2421. ```
  2422. """
  2423. query_params: dict[str, Any] = {
  2424. "limit": limit,
  2425. "offset": offset,
  2426. }
  2427. if status is not None:
  2428. query_params["status"] = status
  2429. if select:
  2430. query_params["select"] = select
  2431. if params:
  2432. query_params.update(params)
  2433. return await self.http.get(
  2434. f"/threads/{thread_id}/runs", params=query_params, headers=headers
  2435. )
  2436. async def get(
  2437. self,
  2438. thread_id: str,
  2439. run_id: str,
  2440. *,
  2441. headers: Mapping[str, str] | None = None,
  2442. params: QueryParamTypes | None = None,
  2443. ) -> Run:
  2444. """Get a run.
  2445. Args:
  2446. thread_id: The thread ID to get.
  2447. run_id: The run ID to get.
  2448. headers: Optional custom headers to include with the request.
  2449. params: Optional query parameters to include with the request.
  2450. Returns:
  2451. `Run` object.
  2452. ???+ example "Example Usage"
  2453. ```python
  2454. client = get_client(url="http://localhost:2024")
  2455. run = await client.runs.get(
  2456. thread_id="thread_id_to_delete",
  2457. run_id="run_id_to_delete",
  2458. )
  2459. ```
  2460. """
  2461. return await self.http.get(
  2462. f"/threads/{thread_id}/runs/{run_id}", headers=headers, params=params
  2463. )
  2464. async def cancel(
  2465. self,
  2466. thread_id: str,
  2467. run_id: str,
  2468. *,
  2469. wait: bool = False,
  2470. action: CancelAction = "interrupt",
  2471. headers: Mapping[str, str] | None = None,
  2472. params: QueryParamTypes | None = None,
  2473. ) -> None:
  2474. """Get a run.
  2475. Args:
  2476. thread_id: The thread ID to cancel.
  2477. run_id: The run ID to cancel.
  2478. wait: Whether to wait until run has completed.
  2479. action: Action to take when cancelling the run. Possible values
  2480. are `interrupt` or `rollback`. Default is `interrupt`.
  2481. headers: Optional custom headers to include with the request.
  2482. params: Optional query parameters to include with the request.
  2483. Returns:
  2484. `None`
  2485. ???+ example "Example Usage"
  2486. ```python
  2487. client = get_client(url="http://localhost:2024")
  2488. await client.runs.cancel(
  2489. thread_id="thread_id_to_cancel",
  2490. run_id="run_id_to_cancel",
  2491. wait=True,
  2492. action="interrupt"
  2493. )
  2494. ```
  2495. """
  2496. query_params = {
  2497. "wait": 1 if wait else 0,
  2498. "action": action,
  2499. }
  2500. if params:
  2501. query_params.update(params)
  2502. if wait:
  2503. return await self.http.request_reconnect(
  2504. f"/threads/{thread_id}/runs/{run_id}/cancel",
  2505. "POST",
  2506. params=query_params,
  2507. headers=headers,
  2508. )
  2509. else:
  2510. return await self.http.post(
  2511. f"/threads/{thread_id}/runs/{run_id}/cancel",
  2512. json=None,
  2513. params=query_params,
  2514. headers=headers,
  2515. )
  2516. async def join(
  2517. self,
  2518. thread_id: str,
  2519. run_id: str,
  2520. *,
  2521. headers: Mapping[str, str] | None = None,
  2522. params: QueryParamTypes | None = None,
  2523. ) -> dict:
  2524. """Block until a run is done. Returns the final state of the thread.
  2525. Args:
  2526. thread_id: The thread ID to join.
  2527. run_id: The run ID to join.
  2528. headers: Optional custom headers to include with the request.
  2529. params: Optional query parameters to include with the request.
  2530. Returns:
  2531. `None`
  2532. ???+ example "Example Usage"
  2533. ```python
  2534. client = get_client(url="http://localhost:2024")
  2535. result =await client.runs.join(
  2536. thread_id="thread_id_to_join",
  2537. run_id="run_id_to_join"
  2538. )
  2539. ```
  2540. """
  2541. return await self.http.request_reconnect(
  2542. f"/threads/{thread_id}/runs/{run_id}/join",
  2543. "GET",
  2544. headers=headers,
  2545. params=params,
  2546. )
  2547. def join_stream(
  2548. self,
  2549. thread_id: str,
  2550. run_id: str,
  2551. *,
  2552. cancel_on_disconnect: bool = False,
  2553. stream_mode: StreamMode | Sequence[StreamMode] | None = None,
  2554. headers: Mapping[str, str] | None = None,
  2555. params: QueryParamTypes | None = None,
  2556. last_event_id: str | None = None,
  2557. ) -> AsyncIterator[StreamPart]:
  2558. """Stream output from a run in real-time, until the run is done.
  2559. Output is not buffered, so any output produced before this call will
  2560. not be received here.
  2561. Args:
  2562. thread_id: The thread ID to join.
  2563. run_id: The run ID to join.
  2564. cancel_on_disconnect: Whether to cancel the run when the stream is disconnected.
  2565. stream_mode: The stream mode(s) to use. Must be a subset of the stream modes passed
  2566. when creating the run. Background runs default to having the union of all
  2567. stream modes.
  2568. headers: Optional custom headers to include with the request.
  2569. params: Optional query parameters to include with the request.
  2570. last_event_id: The last event ID to use for the stream.
  2571. Returns:
  2572. The stream of parts.
  2573. ???+ example "Example Usage"
  2574. ```python
  2575. client = get_client(url="http://localhost:2024")
  2576. async for part in client.runs.join_stream(
  2577. thread_id="thread_id_to_join",
  2578. run_id="run_id_to_join",
  2579. stream_mode=["values", "debug"]
  2580. ):
  2581. print(part)
  2582. ```
  2583. """
  2584. query_params = {
  2585. "cancel_on_disconnect": cancel_on_disconnect,
  2586. "stream_mode": stream_mode,
  2587. }
  2588. if params:
  2589. query_params.update(params)
  2590. return self.http.stream(
  2591. f"/threads/{thread_id}/runs/{run_id}/stream",
  2592. "GET",
  2593. params=query_params,
  2594. headers={
  2595. **({"Last-Event-ID": last_event_id} if last_event_id else {}),
  2596. **(headers or {}),
  2597. }
  2598. or None,
  2599. )
  2600. async def delete(
  2601. self,
  2602. thread_id: str,
  2603. run_id: str,
  2604. *,
  2605. headers: Mapping[str, str] | None = None,
  2606. params: QueryParamTypes | None = None,
  2607. ) -> None:
  2608. """Delete a run.
  2609. Args:
  2610. thread_id: The thread ID to delete.
  2611. run_id: The run ID to delete.
  2612. headers: Optional custom headers to include with the request.
  2613. params: Optional query parameters to include with the request.
  2614. Returns:
  2615. `None`
  2616. ???+ example "Example Usage"
  2617. ```python
  2618. client = get_client(url="http://localhost:2024")
  2619. await client.runs.delete(
  2620. thread_id="thread_id_to_delete",
  2621. run_id="run_id_to_delete"
  2622. )
  2623. ```
  2624. """
  2625. await self.http.delete(
  2626. f"/threads/{thread_id}/runs/{run_id}", headers=headers, params=params
  2627. )
  2628. class CronClient:
  2629. """Client for managing recurrent runs (cron jobs) in LangGraph.
  2630. A run is a single invocation of an assistant with optional input, config, and context.
  2631. This client allows scheduling recurring runs to occur automatically.
  2632. ???+ example "Example Usage"
  2633. ```python
  2634. client = get_client(url="http://localhost:2024"))
  2635. cron_job = await client.crons.create_for_thread(
  2636. thread_id="thread_123",
  2637. assistant_id="asst_456",
  2638. schedule="0 9 * * *",
  2639. input={"message": "Daily update"}
  2640. )
  2641. ```
  2642. !!! note "Feature Availability"
  2643. The crons client functionality is not supported on all licenses.
  2644. Please check the relevant license documentation for the most up-to-date
  2645. details on feature availability.
  2646. """
  2647. def __init__(self, http_client: HttpClient) -> None:
  2648. self.http = http_client
  2649. async def create_for_thread(
  2650. self,
  2651. thread_id: str,
  2652. assistant_id: str,
  2653. *,
  2654. schedule: str,
  2655. input: Input | None = None,
  2656. metadata: Mapping[str, Any] | None = None,
  2657. config: Config | None = None,
  2658. context: Context | None = None,
  2659. checkpoint_during: bool | None = None,
  2660. interrupt_before: All | list[str] | None = None,
  2661. interrupt_after: All | list[str] | None = None,
  2662. webhook: str | None = None,
  2663. multitask_strategy: str | None = None,
  2664. headers: Mapping[str, str] | None = None,
  2665. params: QueryParamTypes | None = None,
  2666. ) -> Run:
  2667. """Create a cron job for a thread.
  2668. Args:
  2669. thread_id: the thread ID to run the cron job on.
  2670. assistant_id: The assistant ID or graph name to use for the cron job.
  2671. If using graph name, will default to first assistant created from that graph.
  2672. schedule: The cron schedule to execute this job on.
  2673. input: The input to the graph.
  2674. metadata: Metadata to assign to the cron job runs.
  2675. config: The configuration for the assistant.
  2676. context: Static context to add to the assistant.
  2677. !!! version-added "Added in version 0.6.0"
  2678. checkpoint_during: Whether to checkpoint during the run (or only at the end/interruption).
  2679. interrupt_before: Nodes to interrupt immediately before they get executed.
  2680. interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
  2681. webhook: Webhook to call after LangGraph API call is done.
  2682. multitask_strategy: Multitask strategy to use.
  2683. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
  2684. headers: Optional custom headers to include with the request.
  2685. params: Optional query parameters to include with the request.
  2686. Returns:
  2687. The cron run.
  2688. ???+ example "Example Usage"
  2689. ```python
  2690. client = get_client(url="http://localhost:2024")
  2691. cron_run = await client.crons.create_for_thread(
  2692. thread_id="my-thread-id",
  2693. assistant_id="agent",
  2694. schedule="27 15 * * *",
  2695. input={"messages": [{"role": "user", "content": "hello!"}]},
  2696. metadata={"name":"my_run"},
  2697. context={"model_name": "openai"},
  2698. interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
  2699. interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
  2700. webhook="https://my.fake.webhook.com",
  2701. multitask_strategy="interrupt"
  2702. )
  2703. ```
  2704. """
  2705. payload = {
  2706. "schedule": schedule,
  2707. "input": input,
  2708. "config": config,
  2709. "metadata": metadata,
  2710. "context": context,
  2711. "assistant_id": assistant_id,
  2712. "checkpoint_during": checkpoint_during,
  2713. "interrupt_before": interrupt_before,
  2714. "interrupt_after": interrupt_after,
  2715. "webhook": webhook,
  2716. }
  2717. if multitask_strategy:
  2718. payload["multitask_strategy"] = multitask_strategy
  2719. payload = {k: v for k, v in payload.items() if v is not None}
  2720. return await self.http.post(
  2721. f"/threads/{thread_id}/runs/crons",
  2722. json=payload,
  2723. headers=headers,
  2724. params=params,
  2725. )
  2726. async def create(
  2727. self,
  2728. assistant_id: str,
  2729. *,
  2730. schedule: str,
  2731. input: Input | None = None,
  2732. metadata: Mapping[str, Any] | None = None,
  2733. config: Config | None = None,
  2734. context: Context | None = None,
  2735. checkpoint_during: bool | None = None,
  2736. interrupt_before: All | list[str] | None = None,
  2737. interrupt_after: All | list[str] | None = None,
  2738. webhook: str | None = None,
  2739. multitask_strategy: str | None = None,
  2740. headers: Mapping[str, str] | None = None,
  2741. params: QueryParamTypes | None = None,
  2742. ) -> Run:
  2743. """Create a cron run.
  2744. Args:
  2745. assistant_id: The assistant ID or graph name to use for the cron job.
  2746. If using graph name, will default to first assistant created from that graph.
  2747. schedule: The cron schedule to execute this job on.
  2748. input: The input to the graph.
  2749. metadata: Metadata to assign to the cron job runs.
  2750. config: The configuration for the assistant.
  2751. context: Static context to add to the assistant.
  2752. !!! version-added "Added in version 0.6.0"
  2753. checkpoint_during: Whether to checkpoint during the run (or only at the end/interruption).
  2754. interrupt_before: Nodes to interrupt immediately before they get executed.
  2755. interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
  2756. webhook: Webhook to call after LangGraph API call is done.
  2757. multitask_strategy: Multitask strategy to use.
  2758. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
  2759. headers: Optional custom headers to include with the request.
  2760. params: Optional query parameters to include with the request.
  2761. Returns:
  2762. The cron run.
  2763. ???+ example "Example Usage"
  2764. ```python
  2765. client = get_client(url="http://localhost:2024")
  2766. cron_run = client.crons.create(
  2767. assistant_id="agent",
  2768. schedule="27 15 * * *",
  2769. input={"messages": [{"role": "user", "content": "hello!"}]},
  2770. metadata={"name":"my_run"},
  2771. context={"model_name": "openai"},
  2772. interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
  2773. interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
  2774. webhook="https://my.fake.webhook.com",
  2775. multitask_strategy="interrupt"
  2776. )
  2777. ```
  2778. """
  2779. payload = {
  2780. "schedule": schedule,
  2781. "input": input,
  2782. "config": config,
  2783. "metadata": metadata,
  2784. "context": context,
  2785. "assistant_id": assistant_id,
  2786. "checkpoint_during": checkpoint_during,
  2787. "interrupt_before": interrupt_before,
  2788. "interrupt_after": interrupt_after,
  2789. "webhook": webhook,
  2790. }
  2791. if multitask_strategy:
  2792. payload["multitask_strategy"] = multitask_strategy
  2793. payload = {k: v for k, v in payload.items() if v is not None}
  2794. return await self.http.post(
  2795. "/runs/crons", json=payload, headers=headers, params=params
  2796. )
  2797. async def delete(
  2798. self,
  2799. cron_id: str,
  2800. *,
  2801. headers: Mapping[str, str] | None = None,
  2802. params: QueryParamTypes | None = None,
  2803. ) -> None:
  2804. """Delete a cron.
  2805. Args:
  2806. cron_id: The cron ID to delete.
  2807. headers: Optional custom headers to include with the request.
  2808. params: Optional query parameters to include with the request.
  2809. Returns:
  2810. `None`
  2811. ???+ example "Example Usage"
  2812. ```python
  2813. client = get_client(url="http://localhost:2024")
  2814. await client.crons.delete(
  2815. cron_id="cron_to_delete"
  2816. )
  2817. ```
  2818. """
  2819. await self.http.delete(f"/runs/crons/{cron_id}", headers=headers, params=params)
  2820. async def search(
  2821. self,
  2822. *,
  2823. assistant_id: str | None = None,
  2824. thread_id: str | None = None,
  2825. limit: int = 10,
  2826. offset: int = 0,
  2827. sort_by: CronSortBy | None = None,
  2828. sort_order: SortOrder | None = None,
  2829. select: list[CronSelectField] | None = None,
  2830. headers: Mapping[str, str] | None = None,
  2831. params: QueryParamTypes | None = None,
  2832. ) -> list[Cron]:
  2833. """Get a list of cron jobs.
  2834. Args:
  2835. assistant_id: The assistant ID or graph name to search for.
  2836. thread_id: the thread ID to search for.
  2837. limit: The maximum number of results to return.
  2838. offset: The number of results to skip.
  2839. headers: Optional custom headers to include with the request.
  2840. params: Optional query parameters to include with the request.
  2841. Returns:
  2842. The list of cron jobs returned by the search,
  2843. ???+ example "Example Usage"
  2844. ```python
  2845. client = get_client(url="http://localhost:2024")
  2846. cron_jobs = await client.crons.search(
  2847. assistant_id="my_assistant_id",
  2848. thread_id="my_thread_id",
  2849. limit=5,
  2850. offset=5,
  2851. )
  2852. print(cron_jobs)
  2853. ```
  2854. ```shell
  2855. ----------------------------------------------------------
  2856. [
  2857. {
  2858. 'cron_id': '1ef3cefa-4c09-6926-96d0-3dc97fd5e39b',
  2859. 'assistant_id': 'my_assistant_id',
  2860. 'thread_id': 'my_thread_id',
  2861. 'user_id': None,
  2862. 'payload':
  2863. {
  2864. 'input': {'start_time': ''},
  2865. 'schedule': '4 * * * *',
  2866. 'assistant_id': 'my_assistant_id'
  2867. },
  2868. 'schedule': '4 * * * *',
  2869. 'next_run_date': '2024-07-25T17:04:00+00:00',
  2870. 'end_time': None,
  2871. 'created_at': '2024-07-08T06:02:23.073257+00:00',
  2872. 'updated_at': '2024-07-08T06:02:23.073257+00:00'
  2873. }
  2874. ]
  2875. ```
  2876. """
  2877. payload = {
  2878. "assistant_id": assistant_id,
  2879. "thread_id": thread_id,
  2880. "limit": limit,
  2881. "offset": offset,
  2882. }
  2883. if sort_by:
  2884. payload["sort_by"] = sort_by
  2885. if sort_order:
  2886. payload["sort_order"] = sort_order
  2887. if select:
  2888. payload["select"] = select
  2889. payload = {k: v for k, v in payload.items() if v is not None}
  2890. return await self.http.post(
  2891. "/runs/crons/search", json=payload, headers=headers, params=params
  2892. )
  2893. async def count(
  2894. self,
  2895. *,
  2896. assistant_id: str | None = None,
  2897. thread_id: str | None = None,
  2898. headers: Mapping[str, str] | None = None,
  2899. params: QueryParamTypes | None = None,
  2900. ) -> int:
  2901. """Count cron jobs matching filters.
  2902. Args:
  2903. assistant_id: Assistant ID to filter by.
  2904. thread_id: Thread ID to filter by.
  2905. headers: Optional custom headers to include with the request.
  2906. params: Optional query parameters to include with the request.
  2907. Returns:
  2908. int: Number of crons matching the criteria.
  2909. """
  2910. payload: dict[str, Any] = {}
  2911. if assistant_id:
  2912. payload["assistant_id"] = assistant_id
  2913. if thread_id:
  2914. payload["thread_id"] = thread_id
  2915. return await self.http.post(
  2916. "/runs/crons/count", json=payload, headers=headers, params=params
  2917. )
  2918. class StoreClient:
  2919. """Client for interacting with the graph's shared storage.
  2920. The Store provides a key-value storage system for persisting data across graph executions,
  2921. allowing for stateful operations and data sharing across threads.
  2922. ???+ example "Example"
  2923. ```python
  2924. client = get_client(url="http://localhost:2024")
  2925. await client.store.put_item(["users", "user123"], "mem-123451342", {"name": "Alice", "score": 100})
  2926. ```
  2927. """
  2928. def __init__(self, http: HttpClient) -> None:
  2929. self.http = http
  2930. async def put_item(
  2931. self,
  2932. namespace: Sequence[str],
  2933. /,
  2934. key: str,
  2935. value: Mapping[str, Any],
  2936. index: Literal[False] | list[str] | None = None,
  2937. ttl: int | None = None,
  2938. headers: Mapping[str, str] | None = None,
  2939. params: QueryParamTypes | None = None,
  2940. ) -> None:
  2941. """Store or update an item.
  2942. Args:
  2943. namespace: A list of strings representing the namespace path.
  2944. key: The unique identifier for the item within the namespace.
  2945. value: A dictionary containing the item's data.
  2946. index: Controls search indexing - None (use defaults), False (disable), or list of field paths to index.
  2947. ttl: Optional time-to-live in minutes for the item, or None for no expiration.
  2948. headers: Optional custom headers to include with the request.
  2949. params: Optional query parameters to include with the request.
  2950. Returns:
  2951. `None`
  2952. ???+ example "Example Usage"
  2953. ```python
  2954. client = get_client(url="http://localhost:2024")
  2955. await client.store.put_item(
  2956. ["documents", "user123"],
  2957. key="item456",
  2958. value={"title": "My Document", "content": "Hello World"}
  2959. )
  2960. ```
  2961. """
  2962. for label in namespace:
  2963. if "." in label:
  2964. raise ValueError(
  2965. f"Invalid namespace label '{label}'. Namespace labels cannot contain periods ('.')."
  2966. )
  2967. payload = {
  2968. "namespace": namespace,
  2969. "key": key,
  2970. "value": value,
  2971. "index": index,
  2972. "ttl": ttl,
  2973. }
  2974. await self.http.put(
  2975. "/store/items", json=_provided_vals(payload), headers=headers, params=params
  2976. )
  2977. async def get_item(
  2978. self,
  2979. namespace: Sequence[str],
  2980. /,
  2981. key: str,
  2982. *,
  2983. refresh_ttl: bool | None = None,
  2984. headers: Mapping[str, str] | None = None,
  2985. params: QueryParamTypes | None = None,
  2986. ) -> Item:
  2987. """Retrieve a single item.
  2988. Args:
  2989. key: The unique identifier for the item.
  2990. namespace: Optional list of strings representing the namespace path.
  2991. refresh_ttl: Whether to refresh the TTL on this read operation. If `None`, uses the store's default behavior.
  2992. Returns:
  2993. Item: The retrieved item.
  2994. headers: Optional custom headers to include with the request.
  2995. params: Optional query parameters to include with the request.
  2996. ???+ example "Example Usage"
  2997. ```python
  2998. client = get_client(url="http://localhost:2024")
  2999. item = await client.store.get_item(
  3000. ["documents", "user123"],
  3001. key="item456",
  3002. )
  3003. print(item)
  3004. ```
  3005. ```shell
  3006. ----------------------------------------------------------------
  3007. {
  3008. 'namespace': ['documents', 'user123'],
  3009. 'key': 'item456',
  3010. 'value': {'title': 'My Document', 'content': 'Hello World'},
  3011. 'created_at': '2024-07-30T12:00:00Z',
  3012. 'updated_at': '2024-07-30T12:00:00Z'
  3013. }
  3014. ```
  3015. """
  3016. for label in namespace:
  3017. if "." in label:
  3018. raise ValueError(
  3019. f"Invalid namespace label '{label}'. Namespace labels cannot contain periods ('.')."
  3020. )
  3021. get_params = {"namespace": ".".join(namespace), "key": key}
  3022. if refresh_ttl is not None:
  3023. get_params["refresh_ttl"] = refresh_ttl
  3024. if params:
  3025. get_params = {**get_params, **params}
  3026. return await self.http.get("/store/items", params=get_params, headers=headers)
  3027. async def delete_item(
  3028. self,
  3029. namespace: Sequence[str],
  3030. /,
  3031. key: str,
  3032. headers: Mapping[str, str] | None = None,
  3033. params: QueryParamTypes | None = None,
  3034. ) -> None:
  3035. """Delete an item.
  3036. Args:
  3037. key: The unique identifier for the item.
  3038. namespace: Optional list of strings representing the namespace path.
  3039. headers: Optional custom headers to include with the request.
  3040. params: Optional query parameters to include with the request.
  3041. Returns:
  3042. `None`
  3043. ???+ example "Example Usage"
  3044. ```python
  3045. client = get_client(url="http://localhost:2024")
  3046. await client.store.delete_item(
  3047. ["documents", "user123"],
  3048. key="item456",
  3049. )
  3050. ```
  3051. """
  3052. await self.http.delete(
  3053. "/store/items",
  3054. json={"namespace": namespace, "key": key},
  3055. headers=headers,
  3056. params=params,
  3057. )
  3058. async def search_items(
  3059. self,
  3060. namespace_prefix: Sequence[str],
  3061. /,
  3062. filter: Mapping[str, Any] | None = None,
  3063. limit: int = 10,
  3064. offset: int = 0,
  3065. query: str | None = None,
  3066. refresh_ttl: bool | None = None,
  3067. headers: Mapping[str, str] | None = None,
  3068. params: QueryParamTypes | None = None,
  3069. ) -> SearchItemsResponse:
  3070. """Search for items within a namespace prefix.
  3071. Args:
  3072. namespace_prefix: List of strings representing the namespace prefix.
  3073. filter: Optional dictionary of key-value pairs to filter results.
  3074. limit: Maximum number of items to return (default is 10).
  3075. offset: Number of items to skip before returning results (default is 0).
  3076. query: Optional query for natural language search.
  3077. refresh_ttl: Whether to refresh the TTL on items returned by this search. If `None`, uses the store's default behavior.
  3078. headers: Optional custom headers to include with the request.
  3079. params: Optional query parameters to include with the request.
  3080. Returns:
  3081. A list of items matching the search criteria.
  3082. ???+ example "Example Usage"
  3083. ```python
  3084. client = get_client(url="http://localhost:2024")
  3085. items = await client.store.search_items(
  3086. ["documents"],
  3087. filter={"author": "John Doe"},
  3088. limit=5,
  3089. offset=0
  3090. )
  3091. print(items)
  3092. ```
  3093. ```shell
  3094. ----------------------------------------------------------------
  3095. {
  3096. "items": [
  3097. {
  3098. "namespace": ["documents", "user123"],
  3099. "key": "item789",
  3100. "value": {
  3101. "title": "Another Document",
  3102. "author": "John Doe"
  3103. },
  3104. "created_at": "2024-07-30T12:00:00Z",
  3105. "updated_at": "2024-07-30T12:00:00Z"
  3106. },
  3107. # ... additional items ...
  3108. ]
  3109. }
  3110. ```
  3111. """
  3112. payload = {
  3113. "namespace_prefix": namespace_prefix,
  3114. "filter": filter,
  3115. "limit": limit,
  3116. "offset": offset,
  3117. "query": query,
  3118. "refresh_ttl": refresh_ttl,
  3119. }
  3120. return await self.http.post(
  3121. "/store/items/search",
  3122. json=_provided_vals(payload),
  3123. headers=headers,
  3124. params=params,
  3125. )
  3126. async def list_namespaces(
  3127. self,
  3128. prefix: list[str] | None = None,
  3129. suffix: list[str] | None = None,
  3130. max_depth: int | None = None,
  3131. limit: int = 100,
  3132. offset: int = 0,
  3133. headers: Mapping[str, str] | None = None,
  3134. params: QueryParamTypes | None = None,
  3135. ) -> ListNamespaceResponse:
  3136. """List namespaces with optional match conditions.
  3137. Args:
  3138. prefix: Optional list of strings representing the prefix to filter namespaces.
  3139. suffix: Optional list of strings representing the suffix to filter namespaces.
  3140. max_depth: Optional integer specifying the maximum depth of namespaces to return.
  3141. limit: Maximum number of namespaces to return (default is 100).
  3142. offset: Number of namespaces to skip before returning results (default is 0).
  3143. headers: Optional custom headers to include with the request.
  3144. params: Optional query parameters to include with the request.
  3145. Returns:
  3146. A list of namespaces matching the criteria.
  3147. ???+ example "Example Usage"
  3148. ```python
  3149. client = get_client(url="http://localhost:2024")
  3150. namespaces = await client.store.list_namespaces(
  3151. prefix=["documents"],
  3152. max_depth=3,
  3153. limit=10,
  3154. offset=0
  3155. )
  3156. print(namespaces)
  3157. ----------------------------------------------------------------
  3158. [
  3159. ["documents", "user123", "reports"],
  3160. ["documents", "user456", "invoices"],
  3161. ...
  3162. ]
  3163. ```
  3164. """
  3165. payload = {
  3166. "prefix": prefix,
  3167. "suffix": suffix,
  3168. "max_depth": max_depth,
  3169. "limit": limit,
  3170. "offset": offset,
  3171. }
  3172. return await self.http.post(
  3173. "/store/namespaces",
  3174. json=_provided_vals(payload),
  3175. headers=headers,
  3176. params=params,
  3177. )
  3178. def get_sync_client(
  3179. *,
  3180. url: str | None = None,
  3181. api_key: str | None = NOT_PROVIDED,
  3182. headers: Mapping[str, str] | None = None,
  3183. timeout: TimeoutTypes | None = None,
  3184. ) -> SyncLangGraphClient:
  3185. """Get a synchronous LangGraphClient instance.
  3186. Args:
  3187. url: The URL of the LangGraph API.
  3188. api_key: API key for authentication. Can be:
  3189. - A string: use this exact API key
  3190. - `None`: explicitly skip loading from environment variables
  3191. - Not provided (default): auto-load from environment in this order:
  3192. 1. `LANGGRAPH_API_KEY`
  3193. 2. `LANGSMITH_API_KEY`
  3194. 3. `LANGCHAIN_API_KEY`
  3195. headers: Optional custom headers
  3196. timeout: Optional timeout configuration for the HTTP client.
  3197. Accepts an httpx.Timeout instance, a float (seconds), or a tuple of timeouts.
  3198. Tuple format is (connect, read, write, pool)
  3199. If not provided, defaults to connect=5s, read=300s, write=300s, and pool=5s.
  3200. Returns:
  3201. SyncLangGraphClient: The top-level synchronous client for accessing AssistantsClient,
  3202. ThreadsClient, RunsClient, and CronClient.
  3203. ???+ example "Example"
  3204. ```python
  3205. from langgraph_sdk import get_sync_client
  3206. # get top-level synchronous LangGraphClient
  3207. client = get_sync_client(url="http://localhost:8123")
  3208. # example usage: client.<model>.<method_name>()
  3209. assistant = client.assistants.get(assistant_id="some_uuid")
  3210. ```
  3211. ???+ example "Skip auto-loading API key from environment:"
  3212. ```python
  3213. from langgraph_sdk import get_sync_client
  3214. # Don't load API key from environment variables
  3215. client = get_sync_client(
  3216. url="http://localhost:8123",
  3217. api_key=None
  3218. )
  3219. ```
  3220. """
  3221. if url is None:
  3222. url = "http://localhost:8123"
  3223. transport = httpx.HTTPTransport(retries=5)
  3224. client = httpx.Client(
  3225. base_url=url,
  3226. transport=transport,
  3227. timeout=(
  3228. httpx.Timeout(timeout) # ty: ignore[invalid-argument-type]
  3229. if timeout is not None
  3230. else httpx.Timeout(connect=5, read=300, write=300, pool=5)
  3231. ),
  3232. headers=_get_headers(api_key, headers),
  3233. )
  3234. return SyncLangGraphClient(client)
  3235. class SyncLangGraphClient:
  3236. """Synchronous client for interacting with the LangGraph API.
  3237. This class provides synchronous access to LangGraph API endpoints for managing
  3238. assistants, threads, runs, cron jobs, and data storage.
  3239. ???+ example "Example"
  3240. ```python
  3241. client = get_sync_client(url="http://localhost:2024")
  3242. assistant = client.assistants.get("asst_123")
  3243. ```
  3244. """
  3245. def __init__(self, client: httpx.Client) -> None:
  3246. self.http = SyncHttpClient(client)
  3247. self.assistants = SyncAssistantsClient(self.http)
  3248. self.threads = SyncThreadsClient(self.http)
  3249. self.runs = SyncRunsClient(self.http)
  3250. self.crons = SyncCronClient(self.http)
  3251. self.store = SyncStoreClient(self.http)
  3252. def __enter__(self) -> SyncLangGraphClient:
  3253. """Enter the sync context manager."""
  3254. return self
  3255. def __exit__(
  3256. self,
  3257. exc_type: type[BaseException] | None,
  3258. exc_val: BaseException | None,
  3259. exc_tb: TracebackType | None,
  3260. ) -> None:
  3261. """Exit the sync context manager."""
  3262. self.close()
  3263. def close(self) -> None:
  3264. """Close the underlying HTTP client."""
  3265. if hasattr(self, "http"):
  3266. self.http.client.close()
  3267. class SyncHttpClient:
  3268. """Handle synchronous requests to the LangGraph API.
  3269. Provides error messaging and content handling enhancements above the
  3270. underlying httpx client, mirroring the interface of [HttpClient](#HttpClient)
  3271. but for sync usage.
  3272. Attributes:
  3273. client (httpx.Client): Underlying HTTPX sync client.
  3274. """
  3275. def __init__(self, client: httpx.Client) -> None:
  3276. self.client = client
  3277. def get(
  3278. self,
  3279. path: str,
  3280. *,
  3281. params: QueryParamTypes | None = None,
  3282. headers: Mapping[str, str] | None = None,
  3283. on_response: Callable[[httpx.Response], None] | None = None,
  3284. ) -> Any:
  3285. """Send a `GET` request."""
  3286. r = self.client.get(path, params=params, headers=headers)
  3287. if on_response:
  3288. on_response(r)
  3289. _raise_for_status_typed(r)
  3290. return _decode_json(r)
  3291. def post(
  3292. self,
  3293. path: str,
  3294. *,
  3295. json: dict[str, Any] | list | None,
  3296. params: QueryParamTypes | None = None,
  3297. headers: Mapping[str, str] | None = None,
  3298. on_response: Callable[[httpx.Response], None] | None = None,
  3299. ) -> Any:
  3300. """Send a `POST` request."""
  3301. if json is not None:
  3302. request_headers, content = _encode_json(json)
  3303. else:
  3304. request_headers, content = {}, b""
  3305. if headers:
  3306. request_headers.update(headers)
  3307. r = self.client.post(
  3308. path, headers=request_headers, content=content, params=params
  3309. )
  3310. if on_response:
  3311. on_response(r)
  3312. _raise_for_status_typed(r)
  3313. return _decode_json(r)
  3314. def put(
  3315. self,
  3316. path: str,
  3317. *,
  3318. json: dict,
  3319. params: QueryParamTypes | None = None,
  3320. headers: Mapping[str, str] | None = None,
  3321. on_response: Callable[[httpx.Response], None] | None = None,
  3322. ) -> Any:
  3323. """Send a `PUT` request."""
  3324. request_headers, content = _encode_json(json)
  3325. if headers:
  3326. request_headers.update(headers)
  3327. r = self.client.put(
  3328. path, headers=request_headers, content=content, params=params
  3329. )
  3330. if on_response:
  3331. on_response(r)
  3332. _raise_for_status_typed(r)
  3333. return _decode_json(r)
  3334. def patch(
  3335. self,
  3336. path: str,
  3337. *,
  3338. json: dict,
  3339. params: QueryParamTypes | None = None,
  3340. headers: Mapping[str, str] | None = None,
  3341. on_response: Callable[[httpx.Response], None] | None = None,
  3342. ) -> Any:
  3343. """Send a `PATCH` request."""
  3344. request_headers, content = _encode_json(json)
  3345. if headers:
  3346. request_headers.update(headers)
  3347. r = self.client.patch(
  3348. path, headers=request_headers, content=content, params=params
  3349. )
  3350. if on_response:
  3351. on_response(r)
  3352. _raise_for_status_typed(r)
  3353. return _decode_json(r)
  3354. def delete(
  3355. self,
  3356. path: str,
  3357. *,
  3358. json: Any | None = None,
  3359. params: QueryParamTypes | None = None,
  3360. headers: Mapping[str, str] | None = None,
  3361. on_response: Callable[[httpx.Response], None] | None = None,
  3362. ) -> None:
  3363. """Send a `DELETE` request."""
  3364. r = self.client.request(
  3365. "DELETE", path, json=json, params=params, headers=headers
  3366. )
  3367. if on_response:
  3368. on_response(r)
  3369. _raise_for_status_typed(r)
  3370. def request_reconnect(
  3371. self,
  3372. path: str,
  3373. method: str,
  3374. *,
  3375. json: dict[str, Any] | None = None,
  3376. params: QueryParamTypes | None = None,
  3377. headers: Mapping[str, str] | None = None,
  3378. on_response: Callable[[httpx.Response], None] | None = None,
  3379. reconnect_limit: int = 5,
  3380. ) -> Any:
  3381. """Send a request that automatically reconnects to Location header."""
  3382. request_headers, content = _encode_json(json)
  3383. if headers:
  3384. request_headers.update(headers)
  3385. with self.client.stream(
  3386. method, path, headers=request_headers, content=content, params=params
  3387. ) as r:
  3388. if on_response:
  3389. on_response(r)
  3390. try:
  3391. r.raise_for_status()
  3392. except httpx.HTTPStatusError as e:
  3393. body = r.read().decode()
  3394. if sys.version_info >= (3, 11):
  3395. e.add_note(body)
  3396. else:
  3397. logger.error(f"Error from langgraph-api: {body}", exc_info=e)
  3398. raise e
  3399. loc = r.headers.get("location")
  3400. if reconnect_limit <= 0 or not loc:
  3401. return _decode_json(r)
  3402. try:
  3403. return _decode_json(r)
  3404. except httpx.HTTPError:
  3405. warnings.warn(
  3406. f"Request failed, attempting reconnect to Location: {loc}",
  3407. stacklevel=2,
  3408. )
  3409. r.close()
  3410. return self.request_reconnect(
  3411. loc,
  3412. "GET",
  3413. headers=request_headers,
  3414. # don't pass on_response so it's only called once
  3415. reconnect_limit=reconnect_limit - 1,
  3416. )
  3417. def stream(
  3418. self,
  3419. path: str,
  3420. method: str,
  3421. *,
  3422. json: dict[str, Any] | None = None,
  3423. params: QueryParamTypes | None = None,
  3424. headers: Mapping[str, str] | None = None,
  3425. on_response: Callable[[httpx.Response], None] | None = None,
  3426. ) -> Iterator[StreamPart]:
  3427. """Stream the results of a request using SSE."""
  3428. if json is not None:
  3429. request_headers, content = _encode_json(json)
  3430. else:
  3431. request_headers, content = {}, None
  3432. request_headers["Accept"] = "text/event-stream"
  3433. request_headers["Cache-Control"] = "no-store"
  3434. if headers:
  3435. request_headers.update(headers)
  3436. reconnect_headers = {
  3437. key: value
  3438. for key, value in request_headers.items()
  3439. if key.lower() not in {"content-length", "content-type"}
  3440. }
  3441. last_event_id: str | None = None
  3442. reconnect_path: str | None = None
  3443. reconnect_attempts = 0
  3444. max_reconnect_attempts = 5
  3445. while True:
  3446. current_headers = dict(
  3447. request_headers if reconnect_path is None else reconnect_headers
  3448. )
  3449. if last_event_id is not None:
  3450. current_headers["Last-Event-ID"] = last_event_id
  3451. current_method = method if reconnect_path is None else "GET"
  3452. current_content = content if reconnect_path is None else None
  3453. current_params = params if reconnect_path is None else None
  3454. retry = False
  3455. with self.client.stream(
  3456. current_method,
  3457. reconnect_path or path,
  3458. headers=current_headers,
  3459. content=current_content,
  3460. params=current_params,
  3461. ) as res:
  3462. if reconnect_path is None and on_response:
  3463. on_response(res)
  3464. # check status
  3465. _raise_for_status_typed(res)
  3466. # check content type
  3467. content_type = res.headers.get("content-type", "").partition(";")[0]
  3468. if "text/event-stream" not in content_type:
  3469. raise httpx.TransportError(
  3470. "Expected response header Content-Type to contain 'text/event-stream', "
  3471. f"got {content_type!r}"
  3472. )
  3473. reconnect_location = res.headers.get("location")
  3474. if reconnect_location:
  3475. reconnect_path = reconnect_location
  3476. decoder = SSEDecoder()
  3477. try:
  3478. for line in iter_lines_raw(res):
  3479. sse = decoder.decode(cast(bytes, line).rstrip(b"\n"))
  3480. if sse is not None:
  3481. if decoder.last_event_id is not None:
  3482. last_event_id = decoder.last_event_id
  3483. if sse.event or sse.data is not None:
  3484. yield sse
  3485. except httpx.HTTPError:
  3486. # httpx.TransportError inherits from HTTPError, so transient
  3487. # disconnects during streaming land here.
  3488. if reconnect_path is None:
  3489. raise
  3490. retry = True
  3491. else:
  3492. if sse := decoder.decode(b""):
  3493. if decoder.last_event_id is not None:
  3494. last_event_id = decoder.last_event_id
  3495. if sse.event or sse.data is not None:
  3496. # See async stream implementation for rationale on
  3497. # skipping empty flush events.
  3498. yield sse
  3499. if retry:
  3500. reconnect_attempts += 1
  3501. if reconnect_attempts > max_reconnect_attempts:
  3502. raise httpx.TransportError(
  3503. "Exceeded maximum SSE reconnection attempts"
  3504. )
  3505. continue
  3506. break
  3507. def _encode_json(json: Any) -> tuple[dict[str, str], bytes]:
  3508. body = orjson.dumps(
  3509. json,
  3510. _orjson_default,
  3511. orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NON_STR_KEYS,
  3512. )
  3513. content_length = str(len(body))
  3514. content_type = "application/json"
  3515. headers = {"Content-Length": content_length, "Content-Type": content_type}
  3516. return headers, body
  3517. def _decode_json(r: httpx.Response) -> Any:
  3518. body = r.read()
  3519. return orjson.loads(body) if body else None
  3520. class SyncAssistantsClient:
  3521. """Client for managing assistants in LangGraph synchronously.
  3522. This class provides methods to interact with assistants, which are versioned configurations of your graph.
  3523. ???+ example "Example"
  3524. ```python
  3525. client = get_sync_client(url="http://localhost:2024")
  3526. assistant = client.assistants.get("assistant_id_123")
  3527. ```
  3528. """
  3529. def __init__(self, http: SyncHttpClient) -> None:
  3530. self.http = http
  3531. def get(
  3532. self,
  3533. assistant_id: str,
  3534. *,
  3535. headers: Mapping[str, str] | None = None,
  3536. params: QueryParamTypes | None = None,
  3537. ) -> Assistant:
  3538. """Get an assistant by ID.
  3539. Args:
  3540. assistant_id: The ID of the assistant to get OR the name of the graph (to use the default assistant).
  3541. headers: Optional custom headers to include with the request.
  3542. params: Optional query parameters to include with the request.
  3543. Returns:
  3544. `Assistant` Object.
  3545. ???+ example "Example Usage"
  3546. ```python
  3547. assistant = client.assistants.get(
  3548. assistant_id="my_assistant_id"
  3549. )
  3550. print(assistant)
  3551. ```
  3552. ```shell
  3553. ----------------------------------------------------
  3554. {
  3555. 'assistant_id': 'my_assistant_id',
  3556. 'graph_id': 'agent',
  3557. 'created_at': '2024-06-25T17:10:33.109781+00:00',
  3558. 'updated_at': '2024-06-25T17:10:33.109781+00:00',
  3559. 'config': {},
  3560. 'context': {},
  3561. 'metadata': {'created_by': 'system'}
  3562. }
  3563. ```
  3564. """
  3565. return self.http.get(
  3566. f"/assistants/{assistant_id}", headers=headers, params=params
  3567. )
  3568. def get_graph(
  3569. self,
  3570. assistant_id: str,
  3571. *,
  3572. xray: int | bool = False,
  3573. headers: Mapping[str, str] | None = None,
  3574. params: QueryParamTypes | None = None,
  3575. ) -> dict[str, list[dict[str, Any]]]:
  3576. """Get the graph of an assistant by ID.
  3577. Args:
  3578. assistant_id: The ID of the assistant to get the graph of.
  3579. xray: Include graph representation of subgraphs. If an integer value is provided, only subgraphs with a depth less than or equal to the value will be included.
  3580. headers: Optional custom headers to include with the request.
  3581. params: Optional query parameters to include with the request.
  3582. Returns:
  3583. The graph information for the assistant in JSON format.
  3584. ???+ example "Example Usage"
  3585. ```python
  3586. client = get_sync_client(url="http://localhost:2024")
  3587. graph_info = client.assistants.get_graph(
  3588. assistant_id="my_assistant_id"
  3589. )
  3590. print(graph_info)
  3591. --------------------------------------------------------------------------------------------------------------------------
  3592. {
  3593. 'nodes':
  3594. [
  3595. {'id': '__start__', 'type': 'schema', 'data': '__start__'},
  3596. {'id': '__end__', 'type': 'schema', 'data': '__end__'},
  3597. {'id': 'agent','type': 'runnable','data': {'id': ['langgraph', 'utils', 'RunnableCallable'],'name': 'agent'}},
  3598. ],
  3599. 'edges':
  3600. [
  3601. {'source': '__start__', 'target': 'agent'},
  3602. {'source': 'agent','target': '__end__'}
  3603. ]
  3604. }
  3605. ```
  3606. """
  3607. query_params = {"xray": xray}
  3608. if params:
  3609. query_params.update(params)
  3610. return self.http.get(
  3611. f"/assistants/{assistant_id}/graph", params=query_params, headers=headers
  3612. )
  3613. def get_schemas(
  3614. self,
  3615. assistant_id: str,
  3616. *,
  3617. headers: Mapping[str, str] | None = None,
  3618. params: QueryParamTypes | None = None,
  3619. ) -> GraphSchema:
  3620. """Get the schemas of an assistant by ID.
  3621. Args:
  3622. assistant_id: The ID of the assistant to get the schema of.
  3623. headers: Optional custom headers to include with the request.
  3624. params: Optional query parameters to include with the request.
  3625. Returns:
  3626. GraphSchema: The graph schema for the assistant.
  3627. ???+ example "Example Usage"
  3628. ```python
  3629. client = get_sync_client(url="http://localhost:2024")
  3630. schema = client.assistants.get_schemas(
  3631. assistant_id="my_assistant_id"
  3632. )
  3633. print(schema)
  3634. ```
  3635. ```shell
  3636. ----------------------------------------------------------------------------------------------------------------------------
  3637. {
  3638. 'graph_id': 'agent',
  3639. 'state_schema':
  3640. {
  3641. 'title': 'LangGraphInput',
  3642. '$ref': '#/definitions/AgentState',
  3643. 'definitions':
  3644. {
  3645. 'BaseMessage':
  3646. {
  3647. 'title': 'BaseMessage',
  3648. 'description': 'Base abstract Message class. Messages are the inputs and outputs of ChatModels.',
  3649. 'type': 'object',
  3650. 'properties':
  3651. {
  3652. 'content':
  3653. {
  3654. 'title': 'Content',
  3655. 'anyOf': [
  3656. {'type': 'string'},
  3657. {'type': 'array','items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}
  3658. ]
  3659. },
  3660. 'additional_kwargs':
  3661. {
  3662. 'title': 'Additional Kwargs',
  3663. 'type': 'object'
  3664. },
  3665. 'response_metadata':
  3666. {
  3667. 'title': 'Response Metadata',
  3668. 'type': 'object'
  3669. },
  3670. 'type':
  3671. {
  3672. 'title': 'Type',
  3673. 'type': 'string'
  3674. },
  3675. 'name':
  3676. {
  3677. 'title': 'Name',
  3678. 'type': 'string'
  3679. },
  3680. 'id':
  3681. {
  3682. 'title': 'Id',
  3683. 'type': 'string'
  3684. }
  3685. },
  3686. 'required': ['content', 'type']
  3687. },
  3688. 'AgentState':
  3689. {
  3690. 'title': 'AgentState',
  3691. 'type': 'object',
  3692. 'properties':
  3693. {
  3694. 'messages':
  3695. {
  3696. 'title': 'Messages',
  3697. 'type': 'array',
  3698. 'items': {'$ref': '#/definitions/BaseMessage'}
  3699. }
  3700. },
  3701. 'required': ['messages']
  3702. }
  3703. }
  3704. },
  3705. 'config_schema':
  3706. {
  3707. 'title': 'Configurable',
  3708. 'type': 'object',
  3709. 'properties':
  3710. {
  3711. 'model_name':
  3712. {
  3713. 'title': 'Model Name',
  3714. 'enum': ['anthropic', 'openai'],
  3715. 'type': 'string'
  3716. }
  3717. }
  3718. },
  3719. 'context_schema':
  3720. {
  3721. 'title': 'Context',
  3722. 'type': 'object',
  3723. 'properties':
  3724. {
  3725. 'model_name':
  3726. {
  3727. 'title': 'Model Name',
  3728. 'enum': ['anthropic', 'openai'],
  3729. 'type': 'string'
  3730. }
  3731. }
  3732. }
  3733. }
  3734. ```
  3735. """
  3736. return self.http.get(
  3737. f"/assistants/{assistant_id}/schemas", headers=headers, params=params
  3738. )
  3739. def get_subgraphs(
  3740. self,
  3741. assistant_id: str,
  3742. namespace: str | None = None,
  3743. recurse: bool = False,
  3744. *,
  3745. headers: Mapping[str, str] | None = None,
  3746. params: QueryParamTypes | None = None,
  3747. ) -> Subgraphs:
  3748. """Get the schemas of an assistant by ID.
  3749. Args:
  3750. assistant_id: The ID of the assistant to get the schema of.
  3751. headers: Optional custom headers to include with the request.
  3752. params: Optional query parameters to include with the request.
  3753. Returns:
  3754. Subgraphs: The graph schema for the assistant.
  3755. """
  3756. get_params = {"recurse": recurse}
  3757. if params:
  3758. get_params = {**get_params, **params}
  3759. if namespace is not None:
  3760. return self.http.get(
  3761. f"/assistants/{assistant_id}/subgraphs/{namespace}",
  3762. params=get_params,
  3763. headers=headers,
  3764. )
  3765. else:
  3766. return self.http.get(
  3767. f"/assistants/{assistant_id}/subgraphs",
  3768. params=get_params,
  3769. headers=headers,
  3770. )
  3771. def create(
  3772. self,
  3773. graph_id: str | None,
  3774. config: Config | None = None,
  3775. *,
  3776. context: Context | None = None,
  3777. metadata: Json = None,
  3778. assistant_id: str | None = None,
  3779. if_exists: OnConflictBehavior | None = None,
  3780. name: str | None = None,
  3781. headers: Mapping[str, str] | None = None,
  3782. description: str | None = None,
  3783. params: QueryParamTypes | None = None,
  3784. ) -> Assistant:
  3785. """Create a new assistant.
  3786. Useful when graph is configurable and you want to create different assistants based on different configurations.
  3787. Args:
  3788. graph_id: The ID of the graph the assistant should use. The graph ID is normally set in your langgraph.json configuration.
  3789. config: Configuration to use for the graph.
  3790. context: Static context to add to the assistant.
  3791. !!! version-added "Added in version 0.6.0"
  3792. metadata: Metadata to add to assistant.
  3793. assistant_id: Assistant ID to use, will default to a random UUID if not provided.
  3794. if_exists: How to handle duplicate creation. Defaults to 'raise' under the hood.
  3795. Must be either 'raise' (raise error if duplicate), or 'do_nothing' (return existing assistant).
  3796. name: The name of the assistant. Defaults to 'Untitled' under the hood.
  3797. headers: Optional custom headers to include with the request.
  3798. description: Optional description of the assistant.
  3799. The description field is available for langgraph-api server version>=0.0.45
  3800. params: Optional query parameters to include with the request.
  3801. Returns:
  3802. The created assistant.
  3803. ???+ example "Example Usage"
  3804. ```python
  3805. client = get_sync_client(url="http://localhost:2024")
  3806. assistant = client.assistants.create(
  3807. graph_id="agent",
  3808. context={"model_name": "openai"},
  3809. metadata={"number":1},
  3810. assistant_id="my-assistant-id",
  3811. if_exists="do_nothing",
  3812. name="my_name"
  3813. )
  3814. ```
  3815. """
  3816. payload: dict[str, Any] = {
  3817. "graph_id": graph_id,
  3818. }
  3819. if config:
  3820. payload["config"] = config
  3821. if context:
  3822. payload["context"] = context
  3823. if metadata:
  3824. payload["metadata"] = metadata
  3825. if assistant_id:
  3826. payload["assistant_id"] = assistant_id
  3827. if if_exists:
  3828. payload["if_exists"] = if_exists
  3829. if name:
  3830. payload["name"] = name
  3831. if description:
  3832. payload["description"] = description
  3833. return self.http.post(
  3834. "/assistants", json=payload, headers=headers, params=params
  3835. )
  3836. def update(
  3837. self,
  3838. assistant_id: str,
  3839. *,
  3840. graph_id: str | None = None,
  3841. config: Config | None = None,
  3842. context: Context | None = None,
  3843. metadata: Json = None,
  3844. name: str | None = None,
  3845. headers: Mapping[str, str] | None = None,
  3846. description: str | None = None,
  3847. params: QueryParamTypes | None = None,
  3848. ) -> Assistant:
  3849. """Update an assistant.
  3850. Use this to point to a different graph, update the configuration, or change the metadata of an assistant.
  3851. Args:
  3852. assistant_id: Assistant to update.
  3853. graph_id: The ID of the graph the assistant should use.
  3854. The graph ID is normally set in your langgraph.json configuration. If `None`, assistant will keep pointing to same graph.
  3855. config: Configuration to use for the graph.
  3856. context: Static context to add to the assistant.
  3857. !!! version-added "Added in version 0.6.0"
  3858. metadata: Metadata to merge with existing assistant metadata.
  3859. name: The new name for the assistant.
  3860. headers: Optional custom headers to include with the request.
  3861. description: Optional description of the assistant.
  3862. The description field is available for langgraph-api server version>=0.0.45
  3863. Returns:
  3864. The updated assistant.
  3865. ???+ example "Example Usage"
  3866. ```python
  3867. client = get_sync_client(url="http://localhost:2024")
  3868. assistant = client.assistants.update(
  3869. assistant_id='e280dad7-8618-443f-87f1-8e41841c180f',
  3870. graph_id="other-graph",
  3871. context={"model_name": "anthropic"},
  3872. metadata={"number":2}
  3873. )
  3874. ```
  3875. """
  3876. payload: dict[str, Any] = {}
  3877. if graph_id:
  3878. payload["graph_id"] = graph_id
  3879. if config:
  3880. payload["config"] = config
  3881. if context:
  3882. payload["context"] = context
  3883. if metadata:
  3884. payload["metadata"] = metadata
  3885. if name:
  3886. payload["name"] = name
  3887. if description:
  3888. payload["description"] = description
  3889. return self.http.patch(
  3890. f"/assistants/{assistant_id}",
  3891. json=payload,
  3892. headers=headers,
  3893. params=params,
  3894. )
  3895. def delete(
  3896. self,
  3897. assistant_id: str,
  3898. *,
  3899. headers: Mapping[str, str] | None = None,
  3900. params: QueryParamTypes | None = None,
  3901. ) -> None:
  3902. """Delete an assistant.
  3903. Args:
  3904. assistant_id: The assistant ID to delete.
  3905. headers: Optional custom headers to include with the request.
  3906. params: Optional query parameters to include with the request.
  3907. Returns:
  3908. `None`
  3909. ???+ example "Example Usage"
  3910. ```python
  3911. client = get_sync_client(url="http://localhost:2024")
  3912. client.assistants.delete(
  3913. assistant_id="my_assistant_id"
  3914. )
  3915. ```
  3916. """
  3917. self.http.delete(f"/assistants/{assistant_id}", headers=headers, params=params)
  3918. @overload
  3919. def search(
  3920. self,
  3921. *,
  3922. metadata: Json = None,
  3923. graph_id: str | None = None,
  3924. name: str | None = None,
  3925. limit: int = 10,
  3926. offset: int = 0,
  3927. sort_by: AssistantSortBy | None = None,
  3928. sort_order: SortOrder | None = None,
  3929. select: list[AssistantSelectField] | None = None,
  3930. response_format: Literal["object"],
  3931. headers: Mapping[str, str] | None = None,
  3932. params: QueryParamTypes | None = None,
  3933. ) -> AssistantsSearchResponse: ...
  3934. @overload
  3935. def search(
  3936. self,
  3937. *,
  3938. metadata: Json = None,
  3939. graph_id: str | None = None,
  3940. name: str | None = None,
  3941. limit: int = 10,
  3942. offset: int = 0,
  3943. sort_by: AssistantSortBy | None = None,
  3944. sort_order: SortOrder | None = None,
  3945. select: list[AssistantSelectField] | None = None,
  3946. response_format: Literal["array"] = "array",
  3947. headers: Mapping[str, str] | None = None,
  3948. params: QueryParamTypes | None = None,
  3949. ) -> list[Assistant]: ...
  3950. def search(
  3951. self,
  3952. *,
  3953. metadata: Json = None,
  3954. graph_id: str | None = None,
  3955. name: str | None = None,
  3956. limit: int = 10,
  3957. offset: int = 0,
  3958. sort_by: AssistantSortBy | None = None,
  3959. sort_order: SortOrder | None = None,
  3960. select: list[AssistantSelectField] | None = None,
  3961. response_format: Literal["array", "object"] = "array",
  3962. headers: Mapping[str, str] | None = None,
  3963. params: QueryParamTypes | None = None,
  3964. ) -> AssistantsSearchResponse | list[Assistant]:
  3965. """Search for assistants.
  3966. Args:
  3967. metadata: Metadata to filter by. Exact match filter for each KV pair.
  3968. graph_id: The ID of the graph to filter by.
  3969. The graph ID is normally set in your langgraph.json configuration.
  3970. name: The name of the assistant to filter by.
  3971. The filtering logic will match assistants where 'name' is a substring (case insensitive) of the assistant name.
  3972. limit: The maximum number of results to return.
  3973. offset: The number of results to skip.
  3974. sort_by: The field to sort by.
  3975. sort_order: The order to sort by.
  3976. select: Specific assistant fields to include in the response.
  3977. response_format: Controls the response shape. Use ``"array"`` (default)
  3978. to return a bare list of assistants, or ``"object"`` to return
  3979. a mapping containing assistants plus pagination metadata.
  3980. Defaults to "array", though this default will be changed to "object" in a future release.
  3981. headers: Optional custom headers to include with the request.
  3982. Returns:
  3983. A list of assistants (when ``response_format=\"array\"``) or a mapping
  3984. with the assistants and the next pagination cursor (when
  3985. ``response_format=\"object\"``).
  3986. ???+ example "Example Usage"
  3987. ```python
  3988. client = get_sync_client(url="http://localhost:2024")
  3989. response = client.assistants.search(
  3990. metadata = {"name":"my_name"},
  3991. graph_id="my_graph_id",
  3992. limit=5,
  3993. offset=5,
  3994. response_format="object",
  3995. )
  3996. assistants = response["assistants"]
  3997. next_cursor = response["next"]
  3998. ```
  3999. """
  4000. if response_format not in ("array", "object"):
  4001. raise ValueError("response_format must be 'array' or 'object'")
  4002. payload: dict[str, Any] = {
  4003. "limit": limit,
  4004. "offset": offset,
  4005. }
  4006. if metadata:
  4007. payload["metadata"] = metadata
  4008. if graph_id:
  4009. payload["graph_id"] = graph_id
  4010. if name:
  4011. payload["name"] = name
  4012. if sort_by:
  4013. payload["sort_by"] = sort_by
  4014. if sort_order:
  4015. payload["sort_order"] = sort_order
  4016. if select:
  4017. payload["select"] = select
  4018. next_cursor: str | None = None
  4019. def capture_pagination(response: httpx.Response) -> None:
  4020. nonlocal next_cursor
  4021. next_cursor = response.headers.get("X-Pagination-Next")
  4022. assistants = cast(
  4023. list[Assistant],
  4024. self.http.post(
  4025. "/assistants/search",
  4026. json=payload,
  4027. headers=headers,
  4028. params=params,
  4029. on_response=capture_pagination if response_format == "object" else None,
  4030. ),
  4031. )
  4032. if response_format == "object":
  4033. return {"assistants": assistants, "next": next_cursor}
  4034. return assistants
  4035. def count(
  4036. self,
  4037. *,
  4038. metadata: Json = None,
  4039. graph_id: str | None = None,
  4040. name: str | None = None,
  4041. headers: Mapping[str, str] | None = None,
  4042. params: QueryParamTypes | None = None,
  4043. ) -> int:
  4044. """Count assistants matching filters.
  4045. Args:
  4046. metadata: Metadata to filter by. Exact match for each key/value.
  4047. graph_id: Optional graph id to filter by.
  4048. name: Optional name to filter by.
  4049. The filtering logic will match assistants where 'name' is a substring (case insensitive) of the assistant name.
  4050. headers: Optional custom headers to include with the request.
  4051. params: Optional query parameters to include with the request.
  4052. Returns:
  4053. int: Number of assistants matching the criteria.
  4054. """
  4055. payload: dict[str, Any] = {}
  4056. if metadata:
  4057. payload["metadata"] = metadata
  4058. if graph_id:
  4059. payload["graph_id"] = graph_id
  4060. if name:
  4061. payload["name"] = name
  4062. return self.http.post(
  4063. "/assistants/count", json=payload, headers=headers, params=params
  4064. )
  4065. def get_versions(
  4066. self,
  4067. assistant_id: str,
  4068. metadata: Json = None,
  4069. limit: int = 10,
  4070. offset: int = 0,
  4071. *,
  4072. headers: Mapping[str, str] | None = None,
  4073. params: QueryParamTypes | None = None,
  4074. ) -> list[AssistantVersion]:
  4075. """List all versions of an assistant.
  4076. Args:
  4077. assistant_id: The assistant ID to get versions for.
  4078. metadata: Metadata to filter versions by. Exact match filter for each KV pair.
  4079. limit: The maximum number of versions to return.
  4080. offset: The number of versions to skip.
  4081. headers: Optional custom headers to include with the request.
  4082. Returns:
  4083. A list of assistants.
  4084. ???+ example "Example Usage"
  4085. ```python
  4086. client = get_sync_client(url="http://localhost:2024")
  4087. assistant_versions = client.assistants.get_versions(
  4088. assistant_id="my_assistant_id"
  4089. )
  4090. ```
  4091. """
  4092. payload: dict[str, Any] = {
  4093. "limit": limit,
  4094. "offset": offset,
  4095. }
  4096. if metadata:
  4097. payload["metadata"] = metadata
  4098. return self.http.post(
  4099. f"/assistants/{assistant_id}/versions",
  4100. json=payload,
  4101. headers=headers,
  4102. params=params,
  4103. )
  4104. def set_latest(
  4105. self,
  4106. assistant_id: str,
  4107. version: int,
  4108. *,
  4109. headers: Mapping[str, str] | None = None,
  4110. params: QueryParamTypes | None = None,
  4111. ) -> Assistant:
  4112. """Change the version of an assistant.
  4113. Args:
  4114. assistant_id: The assistant ID to delete.
  4115. version: The version to change to.
  4116. headers: Optional custom headers to include with the request.
  4117. Returns:
  4118. `Assistant` Object.
  4119. ???+ example "Example Usage"
  4120. ```python
  4121. client = get_sync_client(url="http://localhost:2024")
  4122. new_version_assistant = client.assistants.set_latest(
  4123. assistant_id="my_assistant_id",
  4124. version=3
  4125. )
  4126. ```
  4127. """
  4128. payload: dict[str, Any] = {"version": version}
  4129. return self.http.post(
  4130. f"/assistants/{assistant_id}/latest",
  4131. json=payload,
  4132. headers=headers,
  4133. params=params,
  4134. )
  4135. class SyncThreadsClient:
  4136. """Synchronous client for managing threads in LangGraph.
  4137. This class provides methods to create, retrieve, and manage threads,
  4138. which represent conversations or stateful interactions.
  4139. ???+ example "Example"
  4140. ```python
  4141. client = get_sync_client(url="http://localhost:2024")
  4142. thread = client.threads.create(metadata={"user_id": "123"})
  4143. ```
  4144. """
  4145. def __init__(self, http: SyncHttpClient) -> None:
  4146. self.http = http
  4147. def get(
  4148. self,
  4149. thread_id: str,
  4150. *,
  4151. headers: Mapping[str, str] | None = None,
  4152. params: QueryParamTypes | None = None,
  4153. ) -> Thread:
  4154. """Get a thread by ID.
  4155. Args:
  4156. thread_id: The ID of the thread to get.
  4157. headers: Optional custom headers to include with the request.
  4158. Returns:
  4159. `Thread` object.
  4160. ???+ example "Example Usage"
  4161. ```python
  4162. client = get_sync_client(url="http://localhost:2024")
  4163. thread = client.threads.get(
  4164. thread_id="my_thread_id"
  4165. )
  4166. print(thread)
  4167. ```
  4168. ```shell
  4169. -----------------------------------------------------
  4170. {
  4171. 'thread_id': 'my_thread_id',
  4172. 'created_at': '2024-07-18T18:35:15.540834+00:00',
  4173. 'updated_at': '2024-07-18T18:35:15.540834+00:00',
  4174. 'metadata': {'graph_id': 'agent'}
  4175. }
  4176. ```
  4177. """
  4178. return self.http.get(f"/threads/{thread_id}", headers=headers, params=params)
  4179. def create(
  4180. self,
  4181. *,
  4182. metadata: Json = None,
  4183. thread_id: str | None = None,
  4184. if_exists: OnConflictBehavior | None = None,
  4185. supersteps: Sequence[dict[str, Sequence[dict[str, Any]]]] | None = None,
  4186. graph_id: str | None = None,
  4187. ttl: int | Mapping[str, Any] | None = None,
  4188. headers: Mapping[str, str] | None = None,
  4189. params: QueryParamTypes | None = None,
  4190. ) -> Thread:
  4191. """Create a new thread.
  4192. Args:
  4193. metadata: Metadata to add to thread.
  4194. thread_id: ID of thread.
  4195. If `None`, ID will be a randomly generated UUID.
  4196. if_exists: How to handle duplicate creation. Defaults to 'raise' under the hood.
  4197. Must be either 'raise' (raise error if duplicate), or 'do_nothing' (return existing thread).
  4198. supersteps: Apply a list of supersteps when creating a thread, each containing a sequence of updates.
  4199. Each update has `values` or `command` and `as_node`. Used for copying a thread between deployments.
  4200. graph_id: Optional graph ID to associate with the thread.
  4201. ttl: Optional time-to-live in minutes for the thread. You can pass an
  4202. integer (minutes) or a mapping with keys `ttl` and optional
  4203. `strategy` (defaults to "delete").
  4204. headers: Optional custom headers to include with the request.
  4205. Returns:
  4206. The created `Thread`.
  4207. ???+ example "Example Usage"
  4208. ```python
  4209. client = get_sync_client(url="http://localhost:2024")
  4210. thread = client.threads.create(
  4211. metadata={"number":1},
  4212. thread_id="my-thread-id",
  4213. if_exists="raise"
  4214. )
  4215. ```
  4216. )
  4217. """
  4218. payload: dict[str, Any] = {}
  4219. if thread_id:
  4220. payload["thread_id"] = thread_id
  4221. if metadata or graph_id:
  4222. payload["metadata"] = {
  4223. **(metadata or {}),
  4224. **({"graph_id": graph_id} if graph_id else {}),
  4225. }
  4226. if if_exists:
  4227. payload["if_exists"] = if_exists
  4228. if supersteps:
  4229. payload["supersteps"] = [
  4230. {
  4231. "updates": [
  4232. {
  4233. "values": u["values"],
  4234. "command": u.get("command"),
  4235. "as_node": u["as_node"],
  4236. }
  4237. for u in s["updates"]
  4238. ]
  4239. }
  4240. for s in supersteps
  4241. ]
  4242. if ttl is not None:
  4243. if isinstance(ttl, (int, float)):
  4244. payload["ttl"] = {"ttl": ttl, "strategy": "delete"}
  4245. else:
  4246. payload["ttl"] = ttl
  4247. return self.http.post("/threads", json=payload, headers=headers, params=params)
  4248. def update(
  4249. self,
  4250. thread_id: str,
  4251. *,
  4252. metadata: Mapping[str, Any],
  4253. ttl: int | Mapping[str, Any] | None = None,
  4254. headers: Mapping[str, str] | None = None,
  4255. params: QueryParamTypes | None = None,
  4256. ) -> Thread:
  4257. """Update a thread.
  4258. Args:
  4259. thread_id: ID of thread to update.
  4260. metadata: Metadata to merge with existing thread metadata.
  4261. ttl: Optional time-to-live in minutes for the thread. You can pass an
  4262. integer (minutes) or a mapping with keys `ttl` and optional
  4263. `strategy` (defaults to "delete").
  4264. headers: Optional custom headers to include with the request.
  4265. params: Optional query parameters to include with the request.
  4266. Returns:
  4267. The created `Thread`.
  4268. ???+ example "Example Usage"
  4269. ```python
  4270. client = get_sync_client(url="http://localhost:2024")
  4271. thread = client.threads.update(
  4272. thread_id="my-thread-id",
  4273. metadata={"number":1},
  4274. ttl=43_200,
  4275. )
  4276. ```
  4277. """
  4278. payload: dict[str, Any] = {"metadata": metadata}
  4279. if ttl is not None:
  4280. if isinstance(ttl, (int, float)):
  4281. payload["ttl"] = {"ttl": ttl, "strategy": "delete"}
  4282. else:
  4283. payload["ttl"] = ttl
  4284. return self.http.patch(
  4285. f"/threads/{thread_id}",
  4286. json=payload,
  4287. headers=headers,
  4288. params=params,
  4289. )
  4290. def delete(
  4291. self,
  4292. thread_id: str,
  4293. *,
  4294. headers: Mapping[str, str] | None = None,
  4295. params: QueryParamTypes | None = None,
  4296. ) -> None:
  4297. """Delete a thread.
  4298. Args:
  4299. thread_id: The ID of the thread to delete.
  4300. headers: Optional custom headers to include with the request.
  4301. params: Optional query parameters to include with the request.
  4302. Returns:
  4303. `None`
  4304. ???+ example "Example Usage"
  4305. ```python
  4306. client.threads.delete(
  4307. thread_id="my_thread_id"
  4308. )
  4309. ```
  4310. """
  4311. self.http.delete(f"/threads/{thread_id}", headers=headers, params=params)
  4312. def search(
  4313. self,
  4314. *,
  4315. metadata: Json = None,
  4316. values: Json = None,
  4317. ids: Sequence[str] | None = None,
  4318. status: ThreadStatus | None = None,
  4319. limit: int = 10,
  4320. offset: int = 0,
  4321. sort_by: ThreadSortBy | None = None,
  4322. sort_order: SortOrder | None = None,
  4323. select: list[ThreadSelectField] | None = None,
  4324. headers: Mapping[str, str] | None = None,
  4325. params: QueryParamTypes | None = None,
  4326. ) -> list[Thread]:
  4327. """Search for threads.
  4328. Args:
  4329. metadata: Thread metadata to filter on.
  4330. values: State values to filter on.
  4331. ids: List of thread IDs to filter by.
  4332. status: Thread status to filter on.
  4333. Must be one of 'idle', 'busy', 'interrupted' or 'error'.
  4334. limit: Limit on number of threads to return.
  4335. offset: Offset in threads table to start search from.
  4336. headers: Optional custom headers to include with the request.
  4337. Returns:
  4338. List of the threads matching the search parameters.
  4339. ???+ example "Example Usage"
  4340. ```python
  4341. client = get_sync_client(url="http://localhost:2024")
  4342. threads = client.threads.search(
  4343. metadata={"number":1},
  4344. status="interrupted",
  4345. limit=15,
  4346. offset=5
  4347. )
  4348. ```
  4349. """
  4350. payload: dict[str, Any] = {
  4351. "limit": limit,
  4352. "offset": offset,
  4353. }
  4354. if metadata:
  4355. payload["metadata"] = metadata
  4356. if values:
  4357. payload["values"] = values
  4358. if ids:
  4359. payload["ids"] = ids
  4360. if status:
  4361. payload["status"] = status
  4362. if sort_by:
  4363. payload["sort_by"] = sort_by
  4364. if sort_order:
  4365. payload["sort_order"] = sort_order
  4366. if select:
  4367. payload["select"] = select
  4368. return self.http.post(
  4369. "/threads/search", json=payload, headers=headers, params=params
  4370. )
  4371. def count(
  4372. self,
  4373. *,
  4374. metadata: Json = None,
  4375. values: Json = None,
  4376. status: ThreadStatus | None = None,
  4377. headers: Mapping[str, str] | None = None,
  4378. params: QueryParamTypes | None = None,
  4379. ) -> int:
  4380. """Count threads matching filters.
  4381. Args:
  4382. metadata: Thread metadata to filter on.
  4383. values: State values to filter on.
  4384. status: Thread status to filter on.
  4385. headers: Optional custom headers to include with the request.
  4386. params: Optional query parameters to include with the request.
  4387. Returns:
  4388. int: Number of threads matching the criteria.
  4389. """
  4390. payload: dict[str, Any] = {}
  4391. if metadata:
  4392. payload["metadata"] = metadata
  4393. if values:
  4394. payload["values"] = values
  4395. if status:
  4396. payload["status"] = status
  4397. return self.http.post(
  4398. "/threads/count", json=payload, headers=headers, params=params
  4399. )
  4400. def copy(
  4401. self,
  4402. thread_id: str,
  4403. *,
  4404. headers: Mapping[str, str] | None = None,
  4405. params: QueryParamTypes | None = None,
  4406. ) -> None:
  4407. """Copy a thread.
  4408. Args:
  4409. thread_id: The ID of the thread to copy.
  4410. headers: Optional custom headers to include with the request.
  4411. params: Optional query parameters to include with the request.
  4412. Returns:
  4413. `None`
  4414. ???+ example "Example Usage"
  4415. ```python
  4416. client = get_sync_client(url="http://localhost:2024")
  4417. client.threads.copy(
  4418. thread_id="my_thread_id"
  4419. )
  4420. ```
  4421. """
  4422. return self.http.post(
  4423. f"/threads/{thread_id}/copy", json=None, headers=headers, params=params
  4424. )
  4425. def get_state(
  4426. self,
  4427. thread_id: str,
  4428. checkpoint: Checkpoint | None = None,
  4429. checkpoint_id: str | None = None, # deprecated
  4430. *,
  4431. subgraphs: bool = False,
  4432. headers: Mapping[str, str] | None = None,
  4433. params: QueryParamTypes | None = None,
  4434. ) -> ThreadState:
  4435. """Get the state of a thread.
  4436. Args:
  4437. thread_id: The ID of the thread to get the state of.
  4438. checkpoint: The checkpoint to get the state of.
  4439. subgraphs: Include subgraphs states.
  4440. headers: Optional custom headers to include with the request.
  4441. Returns:
  4442. The thread of the state.
  4443. ???+ example "Example Usage"
  4444. ```python
  4445. client = get_sync_client(url="http://localhost:2024")
  4446. thread_state = client.threads.get_state(
  4447. thread_id="my_thread_id",
  4448. checkpoint_id="my_checkpoint_id"
  4449. )
  4450. print(thread_state)
  4451. ```
  4452. ```shell
  4453. ----------------------------------------------------------------------------------------------------------------------------------------------------------------------
  4454. {
  4455. 'values': {
  4456. 'messages': [
  4457. {
  4458. 'content': 'how are you?',
  4459. 'additional_kwargs': {},
  4460. 'response_metadata': {},
  4461. 'type': 'human',
  4462. 'name': None,
  4463. 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10',
  4464. 'example': False
  4465. },
  4466. {
  4467. 'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
  4468. 'additional_kwargs': {},
  4469. 'response_metadata': {},
  4470. 'type': 'ai',
  4471. 'name': None,
  4472. 'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b',
  4473. 'example': False,
  4474. 'tool_calls': [],
  4475. 'invalid_tool_calls': [],
  4476. 'usage_metadata': None
  4477. }
  4478. ]
  4479. },
  4480. 'next': [],
  4481. 'checkpoint':
  4482. {
  4483. 'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
  4484. 'checkpoint_ns': '',
  4485. 'checkpoint_id': '1ef4a9b8-e6fb-67b1-8001-abd5184439d1'
  4486. }
  4487. 'metadata':
  4488. {
  4489. 'step': 1,
  4490. 'run_id': '1ef4a9b8-d7da-679a-a45a-872054341df2',
  4491. 'source': 'loop',
  4492. 'writes':
  4493. {
  4494. 'agent':
  4495. {
  4496. 'messages': [
  4497. {
  4498. 'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b',
  4499. 'name': None,
  4500. 'type': 'ai',
  4501. 'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
  4502. 'example': False,
  4503. 'tool_calls': [],
  4504. 'usage_metadata': None,
  4505. 'additional_kwargs': {},
  4506. 'response_metadata': {},
  4507. 'invalid_tool_calls': []
  4508. }
  4509. ]
  4510. }
  4511. },
  4512. 'user_id': None,
  4513. 'graph_id': 'agent',
  4514. 'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
  4515. 'created_by': 'system',
  4516. 'assistant_id': 'fe096781-5601-53d2-b2f6-0d3403f7e9ca'},
  4517. 'created_at': '2024-07-25T15:35:44.184703+00:00',
  4518. 'parent_config':
  4519. {
  4520. 'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
  4521. 'checkpoint_ns': '',
  4522. 'checkpoint_id': '1ef4a9b8-d80d-6fa7-8000-9300467fad0f'
  4523. }
  4524. }
  4525. ```
  4526. """
  4527. if checkpoint:
  4528. return self.http.post(
  4529. f"/threads/{thread_id}/state/checkpoint",
  4530. json={"checkpoint": checkpoint, "subgraphs": subgraphs},
  4531. headers=headers,
  4532. params=params,
  4533. )
  4534. elif checkpoint_id:
  4535. get_params = {"subgraphs": subgraphs}
  4536. if params:
  4537. get_params = {**get_params, **params}
  4538. return self.http.get(
  4539. f"/threads/{thread_id}/state/{checkpoint_id}",
  4540. params=get_params,
  4541. headers=headers,
  4542. )
  4543. else:
  4544. get_params = {"subgraphs": subgraphs}
  4545. if params:
  4546. get_params = {**get_params, **params}
  4547. return self.http.get(
  4548. f"/threads/{thread_id}/state",
  4549. params=get_params,
  4550. headers=headers,
  4551. )
  4552. def update_state(
  4553. self,
  4554. thread_id: str,
  4555. values: dict[str, Any] | Sequence[dict] | None,
  4556. *,
  4557. as_node: str | None = None,
  4558. checkpoint: Checkpoint | None = None,
  4559. checkpoint_id: str | None = None, # deprecated
  4560. headers: Mapping[str, str] | None = None,
  4561. params: QueryParamTypes | None = None,
  4562. ) -> ThreadUpdateStateResponse:
  4563. """Update the state of a thread.
  4564. Args:
  4565. thread_id: The ID of the thread to update.
  4566. values: The values to update the state with.
  4567. as_node: Update the state as if this node had just executed.
  4568. checkpoint: The checkpoint to update the state of.
  4569. headers: Optional custom headers to include with the request.
  4570. Returns:
  4571. Response after updating a thread's state.
  4572. ???+ example "Example Usage"
  4573. ```python
  4574. response = await client.threads.update_state(
  4575. thread_id="my_thread_id",
  4576. values={"messages":[{"role": "user", "content": "hello!"}]},
  4577. as_node="my_node",
  4578. )
  4579. print(response)
  4580. ----------------------------------------------------------------------------------------------------------------------------------------------------------------------
  4581. {
  4582. 'checkpoint': {
  4583. 'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
  4584. 'checkpoint_ns': '',
  4585. 'checkpoint_id': '1ef4a9b8-e6fb-67b1-8001-abd5184439d1',
  4586. 'checkpoint_map': {}
  4587. }
  4588. }
  4589. ```
  4590. """
  4591. payload: dict[str, Any] = {
  4592. "values": values,
  4593. }
  4594. if checkpoint_id:
  4595. payload["checkpoint_id"] = checkpoint_id
  4596. if checkpoint:
  4597. payload["checkpoint"] = checkpoint
  4598. if as_node:
  4599. payload["as_node"] = as_node
  4600. return self.http.post(
  4601. f"/threads/{thread_id}/state", json=payload, headers=headers, params=params
  4602. )
  4603. def get_history(
  4604. self,
  4605. thread_id: str,
  4606. *,
  4607. limit: int = 10,
  4608. before: str | Checkpoint | None = None,
  4609. metadata: Mapping[str, Any] | None = None,
  4610. checkpoint: Checkpoint | None = None,
  4611. headers: Mapping[str, str] | None = None,
  4612. params: QueryParamTypes | None = None,
  4613. ) -> list[ThreadState]:
  4614. """Get the state history of a thread.
  4615. Args:
  4616. thread_id: The ID of the thread to get the state history for.
  4617. checkpoint: Return states for this subgraph. If empty defaults to root.
  4618. limit: The maximum number of states to return.
  4619. before: Return states before this checkpoint.
  4620. metadata: Filter states by metadata key-value pairs.
  4621. headers: Optional custom headers to include with the request.
  4622. Returns:
  4623. The state history of the `Thread`.
  4624. ???+ example "Example Usage"
  4625. ```python
  4626. thread_state = client.threads.get_history(
  4627. thread_id="my_thread_id",
  4628. limit=5,
  4629. before="my_timestamp",
  4630. metadata={"name":"my_name"}
  4631. )
  4632. ```
  4633. """
  4634. payload: dict[str, Any] = {
  4635. "limit": limit,
  4636. }
  4637. if before:
  4638. payload["before"] = before
  4639. if metadata:
  4640. payload["metadata"] = metadata
  4641. if checkpoint:
  4642. payload["checkpoint"] = checkpoint
  4643. return self.http.post(
  4644. f"/threads/{thread_id}/history",
  4645. json=payload,
  4646. headers=headers,
  4647. params=params,
  4648. )
  4649. def join_stream(
  4650. self,
  4651. thread_id: str,
  4652. *,
  4653. stream_mode: ThreadStreamMode | Sequence[ThreadStreamMode] = "run_modes",
  4654. last_event_id: str | None = None,
  4655. headers: Mapping[str, str] | None = None,
  4656. params: QueryParamTypes | None = None,
  4657. ) -> Iterator[StreamPart]:
  4658. """Get a stream of events for a thread.
  4659. Args:
  4660. thread_id: The ID of the thread to get the stream for.
  4661. last_event_id: The ID of the last event to get.
  4662. headers: Optional custom headers to include with the request.
  4663. params: Optional query parameters to include with the request.
  4664. Returns:
  4665. An iterator of stream parts.
  4666. ???+ example "Example Usage"
  4667. ```python
  4668. for chunk in client.threads.join_stream(
  4669. thread_id="my_thread_id",
  4670. last_event_id="my_event_id",
  4671. stream_mode="run_modes",
  4672. ):
  4673. print(chunk)
  4674. ```
  4675. """
  4676. query_params = {
  4677. "stream_mode": stream_mode,
  4678. }
  4679. if params:
  4680. query_params.update(params)
  4681. return self.http.stream(
  4682. f"/threads/{thread_id}/stream",
  4683. "GET",
  4684. headers={
  4685. **({"Last-Event-ID": last_event_id} if last_event_id else {}),
  4686. **(headers or {}),
  4687. },
  4688. params=query_params,
  4689. )
  4690. class SyncRunsClient:
  4691. """Synchronous client for managing runs in LangGraph.
  4692. This class provides methods to create, retrieve, and manage runs, which represent
  4693. individual executions of graphs.
  4694. ???+ example "Example"
  4695. ```python
  4696. client = get_sync_client(url="http://localhost:2024")
  4697. run = client.runs.create(thread_id="thread_123", assistant_id="asst_456")
  4698. ```
  4699. """
  4700. def __init__(self, http: SyncHttpClient) -> None:
  4701. self.http = http
  4702. @overload
  4703. def stream(
  4704. self,
  4705. thread_id: str,
  4706. assistant_id: str,
  4707. *,
  4708. input: Input | None = None,
  4709. command: Command | None = None,
  4710. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  4711. stream_subgraphs: bool = False,
  4712. metadata: Mapping[str, Any] | None = None,
  4713. config: Config | None = None,
  4714. context: Context | None = None,
  4715. checkpoint: Checkpoint | None = None,
  4716. checkpoint_id: str | None = None,
  4717. checkpoint_during: bool | None = None,
  4718. interrupt_before: All | Sequence[str] | None = None,
  4719. interrupt_after: All | Sequence[str] | None = None,
  4720. feedback_keys: Sequence[str] | None = None,
  4721. on_disconnect: DisconnectMode | None = None,
  4722. webhook: str | None = None,
  4723. multitask_strategy: MultitaskStrategy | None = None,
  4724. if_not_exists: IfNotExists | None = None,
  4725. after_seconds: int | None = None,
  4726. headers: Mapping[str, str] | None = None,
  4727. params: QueryParamTypes | None = None,
  4728. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  4729. ) -> Iterator[StreamPart]: ...
  4730. @overload
  4731. def stream(
  4732. self,
  4733. thread_id: None,
  4734. assistant_id: str,
  4735. *,
  4736. input: Input | None = None,
  4737. command: Command | None = None,
  4738. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  4739. stream_subgraphs: bool = False,
  4740. stream_resumable: bool = False,
  4741. metadata: Mapping[str, Any] | None = None,
  4742. config: Config | None = None,
  4743. context: Context | None = None,
  4744. checkpoint_during: bool | None = None,
  4745. interrupt_before: All | Sequence[str] | None = None,
  4746. interrupt_after: All | Sequence[str] | None = None,
  4747. feedback_keys: Sequence[str] | None = None,
  4748. on_disconnect: DisconnectMode | None = None,
  4749. on_completion: OnCompletionBehavior | None = None,
  4750. if_not_exists: IfNotExists | None = None,
  4751. webhook: str | None = None,
  4752. after_seconds: int | None = None,
  4753. headers: Mapping[str, str] | None = None,
  4754. params: QueryParamTypes | None = None,
  4755. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  4756. ) -> Iterator[StreamPart]: ...
  4757. def stream(
  4758. self,
  4759. thread_id: str | None,
  4760. assistant_id: str,
  4761. *,
  4762. input: Input | None = None,
  4763. command: Command | None = None,
  4764. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  4765. stream_subgraphs: bool = False,
  4766. stream_resumable: bool = False,
  4767. metadata: Mapping[str, Any] | None = None,
  4768. config: Config | None = None,
  4769. context: Context | None = None,
  4770. checkpoint: Checkpoint | None = None,
  4771. checkpoint_id: str | None = None,
  4772. checkpoint_during: bool | None = None, # deprecated
  4773. interrupt_before: All | Sequence[str] | None = None,
  4774. interrupt_after: All | Sequence[str] | None = None,
  4775. feedback_keys: Sequence[str] | None = None,
  4776. on_disconnect: DisconnectMode | None = None,
  4777. on_completion: OnCompletionBehavior | None = None,
  4778. webhook: str | None = None,
  4779. multitask_strategy: MultitaskStrategy | None = None,
  4780. if_not_exists: IfNotExists | None = None,
  4781. after_seconds: int | None = None,
  4782. headers: Mapping[str, str] | None = None,
  4783. params: QueryParamTypes | None = None,
  4784. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  4785. durability: Durability | None = None,
  4786. ) -> Iterator[StreamPart]:
  4787. """Create a run and stream the results.
  4788. Args:
  4789. thread_id: the thread ID to assign to the thread.
  4790. If `None` will create a stateless run.
  4791. assistant_id: The assistant ID or graph name to stream from.
  4792. If using graph name, will default to first assistant created from that graph.
  4793. input: The input to the graph.
  4794. command: The command to execute.
  4795. stream_mode: The stream mode(s) to use.
  4796. stream_subgraphs: Whether to stream output from subgraphs.
  4797. stream_resumable: Whether the stream is considered resumable.
  4798. If true, the stream can be resumed and replayed in its entirety even after disconnection.
  4799. metadata: Metadata to assign to the run.
  4800. config: The configuration for the assistant.
  4801. context: Static context to add to the assistant.
  4802. !!! version-added "Added in version 0.6.0"
  4803. checkpoint: The checkpoint to resume from.
  4804. checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
  4805. interrupt_before: Nodes to interrupt immediately before they get executed.
  4806. interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
  4807. feedback_keys: Feedback keys to assign to run.
  4808. on_disconnect: The disconnect mode to use.
  4809. Must be one of 'cancel' or 'continue'.
  4810. on_completion: Whether to delete or keep the thread created for a stateless run.
  4811. Must be one of 'delete' or 'keep'.
  4812. webhook: Webhook to call after LangGraph API call is done.
  4813. multitask_strategy: Multitask strategy to use.
  4814. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
  4815. if_not_exists: How to handle missing thread. Defaults to 'reject'.
  4816. Must be either 'reject' (raise error if missing), or 'create' (create new thread).
  4817. after_seconds: The number of seconds to wait before starting the run.
  4818. Use to schedule future runs.
  4819. headers: Optional custom headers to include with the request.
  4820. on_run_created: Optional callback to call when a run is created.
  4821. durability: The durability to use for the run. Values are "sync", "async", or "exit".
  4822. "async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
  4823. "sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
  4824. "exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
  4825. Returns:
  4826. Iterator of stream results.
  4827. ???+ example "Example Usage"
  4828. ```python
  4829. client = get_sync_client(url="http://localhost:2024")
  4830. async for chunk in client.runs.stream(
  4831. thread_id=None,
  4832. assistant_id="agent",
  4833. input={"messages": [{"role": "user", "content": "how are you?"}]},
  4834. stream_mode=["values","debug"],
  4835. metadata={"name":"my_run"},
  4836. context={"model_name": "anthropic"},
  4837. interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
  4838. interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
  4839. feedback_keys=["my_feedback_key_1","my_feedback_key_2"],
  4840. webhook="https://my.fake.webhook.com",
  4841. multitask_strategy="interrupt"
  4842. ):
  4843. print(chunk)
  4844. ```
  4845. ```shell
  4846. ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  4847. StreamPart(event='metadata', data={'run_id': '1ef4a9b8-d7da-679a-a45a-872054341df2'})
  4848. StreamPart(event='values', data={'messages': [{'content': 'how are you?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10', 'example': False}]})
  4849. StreamPart(event='values', data={'messages': [{'content': 'how are you?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10', 'example': False}, {'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.", 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'ai', 'name': None, 'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b', 'example': False, 'tool_calls': [], 'invalid_tool_calls': [], 'usage_metadata': None}]})
  4850. StreamPart(event='end', data=None)
  4851. ```
  4852. """
  4853. if checkpoint_during is not None:
  4854. warnings.warn(
  4855. "`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
  4856. DeprecationWarning,
  4857. stacklevel=2,
  4858. )
  4859. payload = {
  4860. "input": input,
  4861. "command": (
  4862. {k: v for k, v in command.items() if v is not None} if command else None
  4863. ),
  4864. "config": config,
  4865. "context": context,
  4866. "metadata": metadata,
  4867. "stream_mode": stream_mode,
  4868. "stream_subgraphs": stream_subgraphs,
  4869. "stream_resumable": stream_resumable,
  4870. "assistant_id": assistant_id,
  4871. "interrupt_before": interrupt_before,
  4872. "interrupt_after": interrupt_after,
  4873. "feedback_keys": feedback_keys,
  4874. "webhook": webhook,
  4875. "checkpoint": checkpoint,
  4876. "checkpoint_id": checkpoint_id,
  4877. "checkpoint_during": checkpoint_during,
  4878. "multitask_strategy": multitask_strategy,
  4879. "if_not_exists": if_not_exists,
  4880. "on_disconnect": on_disconnect,
  4881. "on_completion": on_completion,
  4882. "after_seconds": after_seconds,
  4883. "durability": durability,
  4884. }
  4885. endpoint = (
  4886. f"/threads/{thread_id}/runs/stream"
  4887. if thread_id is not None
  4888. else "/runs/stream"
  4889. )
  4890. def on_response(res: httpx.Response):
  4891. """Callback function to handle the response."""
  4892. if on_run_created and (metadata := _get_run_metadata_from_response(res)):
  4893. on_run_created(metadata)
  4894. return self.http.stream(
  4895. endpoint,
  4896. "POST",
  4897. json={k: v for k, v in payload.items() if v is not None},
  4898. params=params,
  4899. headers=headers,
  4900. on_response=on_response if on_run_created else None,
  4901. )
  4902. @overload
  4903. def create(
  4904. self,
  4905. thread_id: None,
  4906. assistant_id: str,
  4907. *,
  4908. input: Input | None = None,
  4909. command: Command | None = None,
  4910. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  4911. stream_subgraphs: bool = False,
  4912. stream_resumable: bool = False,
  4913. metadata: Mapping[str, Any] | None = None,
  4914. config: Config | None = None,
  4915. context: Context | None = None,
  4916. checkpoint_during: bool | None = None,
  4917. interrupt_before: All | Sequence[str] | None = None,
  4918. interrupt_after: All | Sequence[str] | None = None,
  4919. webhook: str | None = None,
  4920. on_completion: OnCompletionBehavior | None = None,
  4921. if_not_exists: IfNotExists | None = None,
  4922. after_seconds: int | None = None,
  4923. headers: Mapping[str, str] | None = None,
  4924. params: QueryParamTypes | None = None,
  4925. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  4926. ) -> Run: ...
  4927. @overload
  4928. def create(
  4929. self,
  4930. thread_id: str,
  4931. assistant_id: str,
  4932. *,
  4933. input: Input | None = None,
  4934. command: Command | None = None,
  4935. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  4936. stream_subgraphs: bool = False,
  4937. stream_resumable: bool = False,
  4938. metadata: Mapping[str, Any] | None = None,
  4939. config: Config | None = None,
  4940. context: Context | None = None,
  4941. checkpoint: Checkpoint | None = None,
  4942. checkpoint_id: str | None = None,
  4943. checkpoint_during: bool | None = None,
  4944. interrupt_before: All | Sequence[str] | None = None,
  4945. interrupt_after: All | Sequence[str] | None = None,
  4946. webhook: str | None = None,
  4947. multitask_strategy: MultitaskStrategy | None = None,
  4948. if_not_exists: IfNotExists | None = None,
  4949. after_seconds: int | None = None,
  4950. headers: Mapping[str, str] | None = None,
  4951. params: QueryParamTypes | None = None,
  4952. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  4953. ) -> Run: ...
  4954. def create(
  4955. self,
  4956. thread_id: str | None,
  4957. assistant_id: str,
  4958. *,
  4959. input: Input | None = None,
  4960. command: Command | None = None,
  4961. stream_mode: StreamMode | Sequence[StreamMode] = "values",
  4962. stream_subgraphs: bool = False,
  4963. stream_resumable: bool = False,
  4964. metadata: Mapping[str, Any] | None = None,
  4965. config: Config | None = None,
  4966. context: Context | None = None,
  4967. checkpoint: Checkpoint | None = None,
  4968. checkpoint_id: str | None = None,
  4969. checkpoint_during: bool | None = None, # deprecated
  4970. interrupt_before: All | Sequence[str] | None = None,
  4971. interrupt_after: All | Sequence[str] | None = None,
  4972. webhook: str | None = None,
  4973. multitask_strategy: MultitaskStrategy | None = None,
  4974. if_not_exists: IfNotExists | None = None,
  4975. on_completion: OnCompletionBehavior | None = None,
  4976. after_seconds: int | None = None,
  4977. headers: Mapping[str, str] | None = None,
  4978. params: QueryParamTypes | None = None,
  4979. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  4980. durability: Durability | None = None,
  4981. ) -> Run:
  4982. """Create a background run.
  4983. Args:
  4984. thread_id: the thread ID to assign to the thread.
  4985. If `None` will create a stateless run.
  4986. assistant_id: The assistant ID or graph name to stream from.
  4987. If using graph name, will default to first assistant created from that graph.
  4988. input: The input to the graph.
  4989. command: The command to execute.
  4990. stream_mode: The stream mode(s) to use.
  4991. stream_subgraphs: Whether to stream output from subgraphs.
  4992. stream_resumable: Whether the stream is considered resumable.
  4993. If true, the stream can be resumed and replayed in its entirety even after disconnection.
  4994. metadata: Metadata to assign to the run.
  4995. config: The configuration for the assistant.
  4996. context: Static context to add to the assistant.
  4997. !!! version-added "Added in version 0.6.0"
  4998. checkpoint: The checkpoint to resume from.
  4999. checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
  5000. interrupt_before: Nodes to interrupt immediately before they get executed.
  5001. interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
  5002. webhook: Webhook to call after LangGraph API call is done.
  5003. multitask_strategy: Multitask strategy to use.
  5004. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
  5005. on_completion: Whether to delete or keep the thread created for a stateless run.
  5006. Must be one of 'delete' or 'keep'.
  5007. if_not_exists: How to handle missing thread. Defaults to 'reject'.
  5008. Must be either 'reject' (raise error if missing), or 'create' (create new thread).
  5009. after_seconds: The number of seconds to wait before starting the run.
  5010. Use to schedule future runs.
  5011. headers: Optional custom headers to include with the request.
  5012. on_run_created: Optional callback to call when a run is created.
  5013. durability: The durability to use for the run. Values are "sync", "async", or "exit".
  5014. "async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
  5015. "sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
  5016. "exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
  5017. Returns:
  5018. The created background `Run`.
  5019. ???+ example "Example Usage"
  5020. ```python
  5021. client = get_sync_client(url="http://localhost:2024")
  5022. background_run = client.runs.create(
  5023. thread_id="my_thread_id",
  5024. assistant_id="my_assistant_id",
  5025. input={"messages": [{"role": "user", "content": "hello!"}]},
  5026. metadata={"name":"my_run"},
  5027. context={"model_name": "openai"},
  5028. interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
  5029. interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
  5030. webhook="https://my.fake.webhook.com",
  5031. multitask_strategy="interrupt"
  5032. )
  5033. print(background_run)
  5034. ```
  5035. ```shell
  5036. --------------------------------------------------------------------------------
  5037. {
  5038. 'run_id': 'my_run_id',
  5039. 'thread_id': 'my_thread_id',
  5040. 'assistant_id': 'my_assistant_id',
  5041. 'created_at': '2024-07-25T15:35:42.598503+00:00',
  5042. 'updated_at': '2024-07-25T15:35:42.598503+00:00',
  5043. 'metadata': {},
  5044. 'status': 'pending',
  5045. 'kwargs':
  5046. {
  5047. 'input':
  5048. {
  5049. 'messages': [
  5050. {
  5051. 'role': 'user',
  5052. 'content': 'how are you?'
  5053. }
  5054. ]
  5055. },
  5056. 'config':
  5057. {
  5058. 'metadata':
  5059. {
  5060. 'created_by': 'system'
  5061. },
  5062. 'configurable':
  5063. {
  5064. 'run_id': 'my_run_id',
  5065. 'user_id': None,
  5066. 'graph_id': 'agent',
  5067. 'thread_id': 'my_thread_id',
  5068. 'checkpoint_id': None,
  5069. 'assistant_id': 'my_assistant_id'
  5070. }
  5071. },
  5072. 'context':
  5073. {
  5074. 'model_name': 'openai'
  5075. },
  5076. 'webhook': "https://my.fake.webhook.com",
  5077. 'temporary': False,
  5078. 'stream_mode': ['values'],
  5079. 'feedback_keys': None,
  5080. 'interrupt_after': ["node_to_stop_after_1","node_to_stop_after_2"],
  5081. 'interrupt_before': ["node_to_stop_before_1","node_to_stop_before_2"]
  5082. },
  5083. 'multitask_strategy': 'interrupt'
  5084. }
  5085. ```
  5086. """
  5087. if checkpoint_during is not None:
  5088. warnings.warn(
  5089. "`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
  5090. DeprecationWarning,
  5091. stacklevel=2,
  5092. )
  5093. payload = {
  5094. "input": input,
  5095. "command": (
  5096. {k: v for k, v in command.items() if v is not None} if command else None
  5097. ),
  5098. "stream_mode": stream_mode,
  5099. "stream_subgraphs": stream_subgraphs,
  5100. "stream_resumable": stream_resumable,
  5101. "config": config,
  5102. "context": context,
  5103. "metadata": metadata,
  5104. "assistant_id": assistant_id,
  5105. "interrupt_before": interrupt_before,
  5106. "interrupt_after": interrupt_after,
  5107. "webhook": webhook,
  5108. "checkpoint": checkpoint,
  5109. "checkpoint_id": checkpoint_id,
  5110. "checkpoint_during": checkpoint_during,
  5111. "multitask_strategy": multitask_strategy,
  5112. "if_not_exists": if_not_exists,
  5113. "on_completion": on_completion,
  5114. "after_seconds": after_seconds,
  5115. "durability": durability,
  5116. }
  5117. payload = {k: v for k, v in payload.items() if v is not None}
  5118. def on_response(res: httpx.Response):
  5119. """Callback function to handle the response."""
  5120. if on_run_created and (metadata := _get_run_metadata_from_response(res)):
  5121. on_run_created(metadata)
  5122. return self.http.post(
  5123. f"/threads/{thread_id}/runs" if thread_id else "/runs",
  5124. json=payload,
  5125. params=params,
  5126. headers=headers,
  5127. on_response=on_response if on_run_created else None,
  5128. )
  5129. def create_batch(
  5130. self,
  5131. payloads: list[RunCreate],
  5132. *,
  5133. headers: Mapping[str, str] | None = None,
  5134. params: QueryParamTypes | None = None,
  5135. ) -> list[Run]:
  5136. """Create a batch of stateless background runs."""
  5137. def filter_payload(payload: RunCreate):
  5138. return {k: v for k, v in payload.items() if v is not None}
  5139. filtered = [filter_payload(payload) for payload in payloads]
  5140. return self.http.post(
  5141. "/runs/batch", json=filtered, headers=headers, params=params
  5142. )
  5143. @overload
  5144. def wait(
  5145. self,
  5146. thread_id: str,
  5147. assistant_id: str,
  5148. *,
  5149. input: Input | None = None,
  5150. command: Command | None = None,
  5151. metadata: Mapping[str, Any] | None = None,
  5152. config: Config | None = None,
  5153. context: Context | None = None,
  5154. checkpoint: Checkpoint | None = None,
  5155. checkpoint_id: str | None = None,
  5156. checkpoint_during: bool | None = None,
  5157. interrupt_before: All | Sequence[str] | None = None,
  5158. interrupt_after: All | Sequence[str] | None = None,
  5159. webhook: str | None = None,
  5160. on_disconnect: DisconnectMode | None = None,
  5161. multitask_strategy: MultitaskStrategy | None = None,
  5162. if_not_exists: IfNotExists | None = None,
  5163. after_seconds: int | None = None,
  5164. raise_error: bool = True,
  5165. headers: Mapping[str, str] | None = None,
  5166. params: QueryParamTypes | None = None,
  5167. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  5168. ) -> list[dict] | dict[str, Any]: ...
  5169. @overload
  5170. def wait(
  5171. self,
  5172. thread_id: None,
  5173. assistant_id: str,
  5174. *,
  5175. input: Input | None = None,
  5176. command: Command | None = None,
  5177. metadata: Mapping[str, Any] | None = None,
  5178. config: Config | None = None,
  5179. context: Context | None = None,
  5180. checkpoint_during: bool | None = None,
  5181. interrupt_before: All | Sequence[str] | None = None,
  5182. interrupt_after: All | Sequence[str] | None = None,
  5183. webhook: str | None = None,
  5184. on_disconnect: DisconnectMode | None = None,
  5185. on_completion: OnCompletionBehavior | None = None,
  5186. if_not_exists: IfNotExists | None = None,
  5187. after_seconds: int | None = None,
  5188. raise_error: bool = True,
  5189. headers: Mapping[str, str] | None = None,
  5190. params: QueryParamTypes | None = None,
  5191. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  5192. ) -> list[dict] | dict[str, Any]: ...
  5193. def wait(
  5194. self,
  5195. thread_id: str | None,
  5196. assistant_id: str,
  5197. *,
  5198. input: Input | None = None,
  5199. command: Command | None = None,
  5200. metadata: Mapping[str, Any] | None = None,
  5201. config: Config | None = None,
  5202. context: Context | None = None,
  5203. checkpoint_during: bool | None = None, # deprecated
  5204. checkpoint: Checkpoint | None = None,
  5205. checkpoint_id: str | None = None,
  5206. interrupt_before: All | Sequence[str] | None = None,
  5207. interrupt_after: All | Sequence[str] | None = None,
  5208. webhook: str | None = None,
  5209. on_disconnect: DisconnectMode | None = None,
  5210. on_completion: OnCompletionBehavior | None = None,
  5211. multitask_strategy: MultitaskStrategy | None = None,
  5212. if_not_exists: IfNotExists | None = None,
  5213. after_seconds: int | None = None,
  5214. raise_error: bool = True,
  5215. headers: Mapping[str, str] | None = None,
  5216. params: QueryParamTypes | None = None,
  5217. on_run_created: Callable[[RunCreateMetadata], None] | None = None,
  5218. durability: Durability | None = None,
  5219. ) -> list[dict] | dict[str, Any]:
  5220. """Create a run, wait until it finishes and return the final state.
  5221. Args:
  5222. thread_id: the thread ID to create the run on.
  5223. If `None` will create a stateless run.
  5224. assistant_id: The assistant ID or graph name to run.
  5225. If using graph name, will default to first assistant created from that graph.
  5226. input: The input to the graph.
  5227. command: The command to execute.
  5228. metadata: Metadata to assign to the run.
  5229. config: The configuration for the assistant.
  5230. context: Static context to add to the assistant.
  5231. !!! version-added "Added in version 0.6.0"
  5232. checkpoint: The checkpoint to resume from.
  5233. checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
  5234. interrupt_before: Nodes to interrupt immediately before they get executed.
  5235. interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
  5236. webhook: Webhook to call after LangGraph API call is done.
  5237. on_disconnect: The disconnect mode to use.
  5238. Must be one of 'cancel' or 'continue'.
  5239. on_completion: Whether to delete or keep the thread created for a stateless run.
  5240. Must be one of 'delete' or 'keep'.
  5241. multitask_strategy: Multitask strategy to use.
  5242. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
  5243. if_not_exists: How to handle missing thread. Defaults to 'reject'.
  5244. Must be either 'reject' (raise error if missing), or 'create' (create new thread).
  5245. after_seconds: The number of seconds to wait before starting the run.
  5246. Use to schedule future runs.
  5247. raise_error: Whether to raise an error if the run fails.
  5248. headers: Optional custom headers to include with the request.
  5249. on_run_created: Optional callback to call when a run is created.
  5250. durability: The durability to use for the run. Values are "sync", "async", or "exit".
  5251. "async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
  5252. "sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
  5253. "exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
  5254. Returns:
  5255. The output of the `Run`.
  5256. ???+ example "Example Usage"
  5257. ```python
  5258. final_state_of_run = client.runs.wait(
  5259. thread_id=None,
  5260. assistant_id="agent",
  5261. input={"messages": [{"role": "user", "content": "how are you?"}]},
  5262. metadata={"name":"my_run"},
  5263. context={"model_name": "anthropic"},
  5264. interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
  5265. interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
  5266. webhook="https://my.fake.webhook.com",
  5267. multitask_strategy="interrupt"
  5268. )
  5269. print(final_state_of_run)
  5270. ```
  5271. ```shell
  5272. -------------------------------------------------------------------------------------------------------------------------------------------
  5273. {
  5274. 'messages': [
  5275. {
  5276. 'content': 'how are you?',
  5277. 'additional_kwargs': {},
  5278. 'response_metadata': {},
  5279. 'type': 'human',
  5280. 'name': None,
  5281. 'id': 'f51a862c-62fe-4866-863b-b0863e8ad78a',
  5282. 'example': False
  5283. },
  5284. {
  5285. 'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
  5286. 'additional_kwargs': {},
  5287. 'response_metadata': {},
  5288. 'type': 'ai',
  5289. 'name': None,
  5290. 'id': 'run-bf1cd3c6-768f-4c16-b62d-ba6f17ad8b36',
  5291. 'example': False,
  5292. 'tool_calls': [],
  5293. 'invalid_tool_calls': [],
  5294. 'usage_metadata': None
  5295. }
  5296. ]
  5297. }
  5298. ```
  5299. """
  5300. if checkpoint_during is not None:
  5301. warnings.warn(
  5302. "`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
  5303. DeprecationWarning,
  5304. stacklevel=2,
  5305. )
  5306. payload = {
  5307. "input": input,
  5308. "command": (
  5309. {k: v for k, v in command.items() if v is not None} if command else None
  5310. ),
  5311. "config": config,
  5312. "context": context,
  5313. "metadata": metadata,
  5314. "assistant_id": assistant_id,
  5315. "interrupt_before": interrupt_before,
  5316. "interrupt_after": interrupt_after,
  5317. "webhook": webhook,
  5318. "checkpoint": checkpoint,
  5319. "checkpoint_id": checkpoint_id,
  5320. "multitask_strategy": multitask_strategy,
  5321. "if_not_exists": if_not_exists,
  5322. "on_disconnect": on_disconnect,
  5323. "checkpoint_during": checkpoint_during,
  5324. "on_completion": on_completion,
  5325. "after_seconds": after_seconds,
  5326. "raise_error": raise_error,
  5327. "durability": durability,
  5328. }
  5329. def on_response(res: httpx.Response):
  5330. """Callback function to handle the response."""
  5331. if on_run_created and (metadata := _get_run_metadata_from_response(res)):
  5332. on_run_created(metadata)
  5333. endpoint = (
  5334. f"/threads/{thread_id}/runs/wait" if thread_id is not None else "/runs/wait"
  5335. )
  5336. return self.http.request_reconnect(
  5337. endpoint,
  5338. "POST",
  5339. json={k: v for k, v in payload.items() if v is not None},
  5340. params=params,
  5341. headers=headers,
  5342. on_response=on_response if on_run_created else None,
  5343. )
  5344. def list(
  5345. self,
  5346. thread_id: str,
  5347. *,
  5348. limit: int = 10,
  5349. offset: int = 0,
  5350. status: RunStatus | None = None,
  5351. select: list[RunSelectField] | None = None,
  5352. headers: Mapping[str, str] | None = None,
  5353. params: QueryParamTypes | None = None,
  5354. ) -> list[Run]:
  5355. """List runs.
  5356. Args:
  5357. thread_id: The thread ID to list runs for.
  5358. limit: The maximum number of results to return.
  5359. offset: The number of results to skip.
  5360. headers: Optional custom headers to include with the request.
  5361. params: Optional query parameters to include with the request.
  5362. Returns:
  5363. The runs for the thread.
  5364. ???+ example "Example Usage"
  5365. ```python
  5366. client = get_sync_client(url="http://localhost:2024")
  5367. client.runs.list(
  5368. thread_id="thread_id",
  5369. limit=5,
  5370. offset=5,
  5371. )
  5372. ```
  5373. """
  5374. query_params: dict[str, Any] = {"limit": limit, "offset": offset}
  5375. if status is not None:
  5376. query_params["status"] = status
  5377. if select:
  5378. query_params["select"] = select
  5379. if params:
  5380. query_params.update(params)
  5381. return self.http.get(
  5382. f"/threads/{thread_id}/runs", params=query_params, headers=headers
  5383. )
  5384. def get(
  5385. self,
  5386. thread_id: str,
  5387. run_id: str,
  5388. *,
  5389. headers: Mapping[str, str] | None = None,
  5390. params: QueryParamTypes | None = None,
  5391. ) -> Run:
  5392. """Get a run.
  5393. Args:
  5394. thread_id: The thread ID to get.
  5395. run_id: The run ID to get.
  5396. headers: Optional custom headers to include with the request.
  5397. Returns:
  5398. `Run` object.
  5399. ???+ example "Example Usage"
  5400. ```python
  5401. run = client.runs.get(
  5402. thread_id="thread_id_to_delete",
  5403. run_id="run_id_to_delete",
  5404. )
  5405. ```
  5406. """
  5407. return self.http.get(
  5408. f"/threads/{thread_id}/runs/{run_id}", headers=headers, params=params
  5409. )
  5410. def cancel(
  5411. self,
  5412. thread_id: str,
  5413. run_id: str,
  5414. *,
  5415. wait: bool = False,
  5416. action: CancelAction = "interrupt",
  5417. headers: Mapping[str, str] | None = None,
  5418. params: QueryParamTypes | None = None,
  5419. ) -> None:
  5420. """Get a run.
  5421. Args:
  5422. thread_id: The thread ID to cancel.
  5423. run_id: The run ID to cancel.
  5424. wait: Whether to wait until run has completed.
  5425. action: Action to take when cancelling the run. Possible values
  5426. are `interrupt` or `rollback`. Default is `interrupt`.
  5427. headers: Optional custom headers to include with the request.
  5428. params: Optional query parameters to include with the request.
  5429. Returns:
  5430. `None`
  5431. ???+ example "Example Usage"
  5432. ```python
  5433. client = get_sync_client(url="http://localhost:2024")
  5434. client.runs.cancel(
  5435. thread_id="thread_id_to_cancel",
  5436. run_id="run_id_to_cancel",
  5437. wait=True,
  5438. action="interrupt"
  5439. )
  5440. ```
  5441. """
  5442. query_params = {
  5443. "wait": 1 if wait else 0,
  5444. "action": action,
  5445. }
  5446. if params:
  5447. query_params.update(params)
  5448. if wait:
  5449. return self.http.request_reconnect(
  5450. f"/threads/{thread_id}/runs/{run_id}/cancel",
  5451. "POST",
  5452. json=None,
  5453. params=query_params,
  5454. headers=headers,
  5455. )
  5456. return self.http.post(
  5457. f"/threads/{thread_id}/runs/{run_id}/cancel",
  5458. json=None,
  5459. params=query_params,
  5460. headers=headers,
  5461. )
  5462. def join(
  5463. self,
  5464. thread_id: str,
  5465. run_id: str,
  5466. *,
  5467. headers: Mapping[str, str] | None = None,
  5468. params: QueryParamTypes | None = None,
  5469. ) -> dict:
  5470. """Block until a run is done. Returns the final state of the thread.
  5471. Args:
  5472. thread_id: The thread ID to join.
  5473. run_id: The run ID to join.
  5474. headers: Optional custom headers to include with the request.
  5475. params: Optional query parameters to include with the request.
  5476. Returns:
  5477. `None`
  5478. ???+ example "Example Usage"
  5479. ```python
  5480. client = get_sync_client(url="http://localhost:2024")
  5481. client.runs.join(
  5482. thread_id="thread_id_to_join",
  5483. run_id="run_id_to_join"
  5484. )
  5485. ```
  5486. """
  5487. return self.http.request_reconnect(
  5488. f"/threads/{thread_id}/runs/{run_id}/join",
  5489. "GET",
  5490. headers=headers,
  5491. params=params,
  5492. )
  5493. def join_stream(
  5494. self,
  5495. thread_id: str,
  5496. run_id: str,
  5497. *,
  5498. cancel_on_disconnect: bool = False,
  5499. stream_mode: StreamMode | Sequence[StreamMode] | None = None,
  5500. headers: Mapping[str, str] | None = None,
  5501. params: QueryParamTypes | None = None,
  5502. last_event_id: str | None = None,
  5503. ) -> Iterator[StreamPart]:
  5504. """Stream output from a run in real-time, until the run is done.
  5505. Output is not buffered, so any output produced before this call will
  5506. not be received here.
  5507. Args:
  5508. thread_id: The thread ID to join.
  5509. run_id: The run ID to join.
  5510. stream_mode: The stream mode(s) to use. Must be a subset of the stream modes passed
  5511. when creating the run. Background runs default to having the union of all
  5512. stream modes.
  5513. cancel_on_disconnect: Whether to cancel the run when the stream is disconnected.
  5514. headers: Optional custom headers to include with the request.
  5515. params: Optional query parameters to include with the request.
  5516. last_event_id: The last event ID to use for the stream.
  5517. Returns:
  5518. `None`
  5519. ???+ example "Example Usage"
  5520. ```python
  5521. client = get_sync_client(url="http://localhost:2024")
  5522. client.runs.join_stream(
  5523. thread_id="thread_id_to_join",
  5524. run_id="run_id_to_join",
  5525. stream_mode=["values", "debug"]
  5526. )
  5527. ```
  5528. """
  5529. query_params = {
  5530. "stream_mode": stream_mode,
  5531. "cancel_on_disconnect": cancel_on_disconnect,
  5532. }
  5533. if params:
  5534. query_params.update(params)
  5535. return self.http.stream(
  5536. f"/threads/{thread_id}/runs/{run_id}/stream",
  5537. "GET",
  5538. params=query_params,
  5539. headers={
  5540. **({"Last-Event-ID": last_event_id} if last_event_id else {}),
  5541. **(headers or {}),
  5542. }
  5543. or None,
  5544. )
  5545. def delete(
  5546. self,
  5547. thread_id: str,
  5548. run_id: str,
  5549. *,
  5550. headers: Mapping[str, str] | None = None,
  5551. params: QueryParamTypes | None = None,
  5552. ) -> None:
  5553. """Delete a run.
  5554. Args:
  5555. thread_id: The thread ID to delete.
  5556. run_id: The run ID to delete.
  5557. headers: Optional custom headers to include with the request.
  5558. params: Optional query parameters to include with the request.
  5559. Returns:
  5560. `None`
  5561. ???+ example "Example Usage"
  5562. ```python
  5563. client = get_sync_client(url="http://localhost:2024")
  5564. client.runs.delete(
  5565. thread_id="thread_id_to_delete",
  5566. run_id="run_id_to_delete"
  5567. )
  5568. ```
  5569. """
  5570. self.http.delete(
  5571. f"/threads/{thread_id}/runs/{run_id}", headers=headers, params=params
  5572. )
  5573. class SyncCronClient:
  5574. """Synchronous client for managing cron jobs in LangGraph.
  5575. This class provides methods to create and manage scheduled tasks (cron jobs) for automated graph executions.
  5576. ???+ example "Example"
  5577. ```python
  5578. client = get_sync_client(url="http://localhost:8123")
  5579. cron_job = client.crons.create_for_thread(thread_id="thread_123", assistant_id="asst_456", schedule="0 * * * *")
  5580. ```
  5581. !!! note "Feature Availability"
  5582. The crons client functionality is not supported on all licenses.
  5583. Please check the relevant license documentation for the most up-to-date
  5584. details on feature availability.
  5585. """
  5586. def __init__(self, http_client: SyncHttpClient) -> None:
  5587. self.http = http_client
  5588. def create_for_thread(
  5589. self,
  5590. thread_id: str,
  5591. assistant_id: str,
  5592. *,
  5593. schedule: str,
  5594. input: Input | None = None,
  5595. metadata: Mapping[str, Any] | None = None,
  5596. config: Config | None = None,
  5597. context: Context | None = None,
  5598. checkpoint_during: bool | None = None,
  5599. interrupt_before: All | list[str] | None = None,
  5600. interrupt_after: All | list[str] | None = None,
  5601. webhook: str | None = None,
  5602. multitask_strategy: str | None = None,
  5603. headers: Mapping[str, str] | None = None,
  5604. params: QueryParamTypes | None = None,
  5605. ) -> Run:
  5606. """Create a cron job for a thread.
  5607. Args:
  5608. thread_id: the thread ID to run the cron job on.
  5609. assistant_id: The assistant ID or graph name to use for the cron job.
  5610. If using graph name, will default to first assistant created from that graph.
  5611. schedule: The cron schedule to execute this job on.
  5612. input: The input to the graph.
  5613. metadata: Metadata to assign to the cron job runs.
  5614. config: The configuration for the assistant.
  5615. context: Static context to add to the assistant.
  5616. !!! version-added "Added in version 0.6.0"
  5617. checkpoint_during: Whether to checkpoint during the run (or only at the end/interruption).
  5618. interrupt_before: Nodes to interrupt immediately before they get executed.
  5619. interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
  5620. webhook: Webhook to call after LangGraph API call is done.
  5621. multitask_strategy: Multitask strategy to use.
  5622. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
  5623. headers: Optional custom headers to include with the request.
  5624. Returns:
  5625. The cron `Run`.
  5626. ???+ example "Example Usage"
  5627. ```python
  5628. client = get_sync_client(url="http://localhost:8123")
  5629. cron_run = client.crons.create_for_thread(
  5630. thread_id="my-thread-id",
  5631. assistant_id="agent",
  5632. schedule="27 15 * * *",
  5633. input={"messages": [{"role": "user", "content": "hello!"}]},
  5634. metadata={"name":"my_run"},
  5635. context={"model_name": "openai"},
  5636. interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
  5637. interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
  5638. webhook="https://my.fake.webhook.com",
  5639. multitask_strategy="interrupt"
  5640. )
  5641. ```
  5642. """
  5643. payload = {
  5644. "schedule": schedule,
  5645. "input": input,
  5646. "config": config,
  5647. "metadata": metadata,
  5648. "context": context,
  5649. "assistant_id": assistant_id,
  5650. "interrupt_before": interrupt_before,
  5651. "interrupt_after": interrupt_after,
  5652. "checkpoint_during": checkpoint_during,
  5653. "webhook": webhook,
  5654. "multitask_strategy": multitask_strategy,
  5655. }
  5656. payload = {k: v for k, v in payload.items() if v is not None}
  5657. return self.http.post(
  5658. f"/threads/{thread_id}/runs/crons",
  5659. json=payload,
  5660. headers=headers,
  5661. params=params,
  5662. )
  5663. def create(
  5664. self,
  5665. assistant_id: str,
  5666. *,
  5667. schedule: str,
  5668. input: Input | None = None,
  5669. metadata: Mapping[str, Any] | None = None,
  5670. config: Config | None = None,
  5671. context: Context | None = None,
  5672. checkpoint_during: bool | None = None,
  5673. interrupt_before: All | list[str] | None = None,
  5674. interrupt_after: All | list[str] | None = None,
  5675. webhook: str | None = None,
  5676. multitask_strategy: str | None = None,
  5677. headers: Mapping[str, str] | None = None,
  5678. params: QueryParamTypes | None = None,
  5679. ) -> Run:
  5680. """Create a cron run.
  5681. Args:
  5682. assistant_id: The assistant ID or graph name to use for the cron job.
  5683. If using graph name, will default to first assistant created from that graph.
  5684. schedule: The cron schedule to execute this job on.
  5685. input: The input to the graph.
  5686. metadata: Metadata to assign to the cron job runs.
  5687. config: The configuration for the assistant.
  5688. context: Static context to add to the assistant.
  5689. !!! version-added "Added in version 0.6.0"
  5690. checkpoint_during: Whether to checkpoint during the run (or only at the end/interruption).
  5691. interrupt_before: Nodes to interrupt immediately before they get executed.
  5692. interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
  5693. webhook: Webhook to call after LangGraph API call is done.
  5694. multitask_strategy: Multitask strategy to use.
  5695. Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
  5696. headers: Optional custom headers to include with the request.
  5697. Returns:
  5698. The cron `Run`.
  5699. ???+ example "Example Usage"
  5700. ```python
  5701. client = get_sync_client(url="http://localhost:8123")
  5702. cron_run = client.crons.create(
  5703. assistant_id="agent",
  5704. schedule="27 15 * * *",
  5705. input={"messages": [{"role": "user", "content": "hello!"}]},
  5706. metadata={"name":"my_run"},
  5707. context={"model_name": "openai"},
  5708. checkpoint_during=True,
  5709. interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
  5710. interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
  5711. webhook="https://my.fake.webhook.com",
  5712. multitask_strategy="interrupt"
  5713. )
  5714. ```
  5715. """
  5716. payload = {
  5717. "schedule": schedule,
  5718. "input": input,
  5719. "config": config,
  5720. "metadata": metadata,
  5721. "context": context,
  5722. "assistant_id": assistant_id,
  5723. "interrupt_before": interrupt_before,
  5724. "interrupt_after": interrupt_after,
  5725. "webhook": webhook,
  5726. "checkpoint_during": checkpoint_during,
  5727. "multitask_strategy": multitask_strategy,
  5728. }
  5729. payload = {k: v for k, v in payload.items() if v is not None}
  5730. return self.http.post(
  5731. "/runs/crons", json=payload, headers=headers, params=params
  5732. )
  5733. def delete(
  5734. self,
  5735. cron_id: str,
  5736. *,
  5737. headers: Mapping[str, str] | None = None,
  5738. params: QueryParamTypes | None = None,
  5739. ) -> None:
  5740. """Delete a cron.
  5741. Args:
  5742. cron_id: The cron ID to delete.
  5743. headers: Optional custom headers to include with the request.
  5744. params: Optional query parameters to include with the request.
  5745. Returns:
  5746. `None`
  5747. ???+ example "Example Usage"
  5748. ```python
  5749. client = get_sync_client(url="http://localhost:8123")
  5750. client.crons.delete(
  5751. cron_id="cron_to_delete"
  5752. )
  5753. ```
  5754. """
  5755. self.http.delete(f"/runs/crons/{cron_id}", headers=headers, params=params)
  5756. def search(
  5757. self,
  5758. *,
  5759. assistant_id: str | None = None,
  5760. thread_id: str | None = None,
  5761. limit: int = 10,
  5762. offset: int = 0,
  5763. sort_by: CronSortBy | None = None,
  5764. sort_order: SortOrder | None = None,
  5765. select: list[CronSelectField] | None = None,
  5766. headers: Mapping[str, str] | None = None,
  5767. params: QueryParamTypes | None = None,
  5768. ) -> list[Cron]:
  5769. """Get a list of cron jobs.
  5770. Args:
  5771. assistant_id: The assistant ID or graph name to search for.
  5772. thread_id: the thread ID to search for.
  5773. limit: The maximum number of results to return.
  5774. offset: The number of results to skip.
  5775. headers: Optional custom headers to include with the request.
  5776. Returns:
  5777. The list of cron jobs returned by the search,
  5778. ???+ example "Example Usage"
  5779. ```python
  5780. client = get_sync_client(url="http://localhost:8123")
  5781. cron_jobs = client.crons.search(
  5782. assistant_id="my_assistant_id",
  5783. thread_id="my_thread_id",
  5784. limit=5,
  5785. offset=5,
  5786. )
  5787. print(cron_jobs)
  5788. ```
  5789. ```shell
  5790. ----------------------------------------------------------
  5791. [
  5792. {
  5793. 'cron_id': '1ef3cefa-4c09-6926-96d0-3dc97fd5e39b',
  5794. 'assistant_id': 'my_assistant_id',
  5795. 'thread_id': 'my_thread_id',
  5796. 'user_id': None,
  5797. 'payload':
  5798. {
  5799. 'input': {'start_time': ''},
  5800. 'schedule': '4 * * * *',
  5801. 'assistant_id': 'my_assistant_id'
  5802. },
  5803. 'schedule': '4 * * * *',
  5804. 'next_run_date': '2024-07-25T17:04:00+00:00',
  5805. 'end_time': None,
  5806. 'created_at': '2024-07-08T06:02:23.073257+00:00',
  5807. 'updated_at': '2024-07-08T06:02:23.073257+00:00'
  5808. }
  5809. ]
  5810. ```
  5811. """
  5812. payload = {
  5813. "assistant_id": assistant_id,
  5814. "thread_id": thread_id,
  5815. "limit": limit,
  5816. "offset": offset,
  5817. }
  5818. if sort_by:
  5819. payload["sort_by"] = sort_by
  5820. if sort_order:
  5821. payload["sort_order"] = sort_order
  5822. if select:
  5823. payload["select"] = select
  5824. payload = {k: v for k, v in payload.items() if v is not None}
  5825. return self.http.post(
  5826. "/runs/crons/search", json=payload, headers=headers, params=params
  5827. )
  5828. def count(
  5829. self,
  5830. *,
  5831. assistant_id: str | None = None,
  5832. thread_id: str | None = None,
  5833. headers: Mapping[str, str] | None = None,
  5834. params: QueryParamTypes | None = None,
  5835. ) -> int:
  5836. """Count cron jobs matching filters.
  5837. Args:
  5838. assistant_id: Assistant ID to filter by.
  5839. thread_id: Thread ID to filter by.
  5840. headers: Optional custom headers to include with the request.
  5841. params: Optional query parameters to include with the request.
  5842. Returns:
  5843. int: Number of crons matching the criteria.
  5844. """
  5845. payload: dict[str, Any] = {}
  5846. if assistant_id:
  5847. payload["assistant_id"] = assistant_id
  5848. if thread_id:
  5849. payload["thread_id"] = thread_id
  5850. return self.http.post(
  5851. "/runs/crons/count", json=payload, headers=headers, params=params
  5852. )
  5853. class SyncStoreClient:
  5854. """A client for synchronous operations on a key-value store.
  5855. Provides methods to interact with a remote key-value store, allowing
  5856. storage and retrieval of items within namespaced hierarchies.
  5857. ???+ example "Example"
  5858. ```python
  5859. client = get_sync_client(url="http://localhost:2024"))
  5860. client.store.put_item(["users", "profiles"], "user123", {"name": "Alice", "age": 30})
  5861. ```
  5862. """
  5863. def __init__(self, http: SyncHttpClient) -> None:
  5864. self.http = http
  5865. def put_item(
  5866. self,
  5867. namespace: Sequence[str],
  5868. /,
  5869. key: str,
  5870. value: Mapping[str, Any],
  5871. index: Literal[False] | list[str] | None = None,
  5872. ttl: int | None = None,
  5873. headers: Mapping[str, str] | None = None,
  5874. params: QueryParamTypes | None = None,
  5875. ) -> None:
  5876. """Store or update an item.
  5877. Args:
  5878. namespace: A list of strings representing the namespace path.
  5879. key: The unique identifier for the item within the namespace.
  5880. value: A dictionary containing the item's data.
  5881. index: Controls search indexing - None (use defaults), False (disable), or list of field paths to index.
  5882. ttl: Optional time-to-live in minutes for the item, or None for no expiration.
  5883. headers: Optional custom headers to include with the request.
  5884. params: Optional query parameters to include with the request.
  5885. Returns:
  5886. `None`
  5887. ???+ example "Example Usage"
  5888. ```python
  5889. client = get_sync_client(url="http://localhost:8123")
  5890. client.store.put_item(
  5891. ["documents", "user123"],
  5892. key="item456",
  5893. value={"title": "My Document", "content": "Hello World"}
  5894. )
  5895. ```
  5896. """
  5897. for label in namespace:
  5898. if "." in label:
  5899. raise ValueError(
  5900. f"Invalid namespace label '{label}'. Namespace labels cannot contain periods ('.')."
  5901. )
  5902. payload = {
  5903. "namespace": namespace,
  5904. "key": key,
  5905. "value": value,
  5906. "index": index,
  5907. "ttl": ttl,
  5908. }
  5909. self.http.put(
  5910. "/store/items", json=_provided_vals(payload), headers=headers, params=params
  5911. )
  5912. def get_item(
  5913. self,
  5914. namespace: Sequence[str],
  5915. /,
  5916. key: str,
  5917. *,
  5918. refresh_ttl: bool | None = None,
  5919. headers: Mapping[str, str] | None = None,
  5920. params: QueryParamTypes | None = None,
  5921. ) -> Item:
  5922. """Retrieve a single item.
  5923. Args:
  5924. key: The unique identifier for the item.
  5925. namespace: Optional list of strings representing the namespace path.
  5926. refresh_ttl: Whether to refresh the TTL on this read operation. If `None`, uses the store's default behavior.
  5927. headers: Optional custom headers to include with the request.
  5928. Returns:
  5929. The retrieved item.
  5930. ???+ example "Example Usage"
  5931. ```python
  5932. client = get_sync_client(url="http://localhost:8123")
  5933. item = client.store.get_item(
  5934. ["documents", "user123"],
  5935. key="item456",
  5936. )
  5937. print(item)
  5938. ```
  5939. ```shell
  5940. ----------------------------------------------------------------
  5941. {
  5942. 'namespace': ['documents', 'user123'],
  5943. 'key': 'item456',
  5944. 'value': {'title': 'My Document', 'content': 'Hello World'},
  5945. 'created_at': '2024-07-30T12:00:00Z',
  5946. 'updated_at': '2024-07-30T12:00:00Z'
  5947. }
  5948. ```
  5949. """
  5950. for label in namespace:
  5951. if "." in label:
  5952. raise ValueError(
  5953. f"Invalid namespace label '{label}'. Namespace labels cannot contain periods ('.')."
  5954. )
  5955. query_params = {"key": key, "namespace": ".".join(namespace)}
  5956. if refresh_ttl is not None:
  5957. query_params["refresh_ttl"] = refresh_ttl
  5958. if params:
  5959. query_params.update(params)
  5960. return self.http.get("/store/items", params=query_params, headers=headers)
  5961. def delete_item(
  5962. self,
  5963. namespace: Sequence[str],
  5964. /,
  5965. key: str,
  5966. headers: Mapping[str, str] | None = None,
  5967. params: QueryParamTypes | None = None,
  5968. ) -> None:
  5969. """Delete an item.
  5970. Args:
  5971. key: The unique identifier for the item.
  5972. namespace: Optional list of strings representing the namespace path.
  5973. headers: Optional custom headers to include with the request.
  5974. params: Optional query parameters to include with the request.
  5975. Returns:
  5976. `None`
  5977. ???+ example "Example Usage"
  5978. ```python
  5979. client = get_sync_client(url="http://localhost:8123")
  5980. client.store.delete_item(
  5981. ["documents", "user123"],
  5982. key="item456",
  5983. )
  5984. ```
  5985. """
  5986. self.http.delete(
  5987. "/store/items",
  5988. json={"key": key, "namespace": namespace},
  5989. headers=headers,
  5990. params=params,
  5991. )
  5992. def search_items(
  5993. self,
  5994. namespace_prefix: Sequence[str],
  5995. /,
  5996. filter: Mapping[str, Any] | None = None,
  5997. limit: int = 10,
  5998. offset: int = 0,
  5999. query: str | None = None,
  6000. refresh_ttl: bool | None = None,
  6001. headers: Mapping[str, str] | None = None,
  6002. params: QueryParamTypes | None = None,
  6003. ) -> SearchItemsResponse:
  6004. """Search for items within a namespace prefix.
  6005. Args:
  6006. namespace_prefix: List of strings representing the namespace prefix.
  6007. filter: Optional dictionary of key-value pairs to filter results.
  6008. limit: Maximum number of items to return (default is 10).
  6009. offset: Number of items to skip before returning results (default is 0).
  6010. query: Optional query for natural language search.
  6011. refresh_ttl: Whether to refresh the TTL on items returned by this search. If `None`, uses the store's default behavior.
  6012. headers: Optional custom headers to include with the request.
  6013. params: Optional query parameters to include with the request.
  6014. Returns:
  6015. A list of items matching the search criteria.
  6016. ???+ example "Example Usage"
  6017. ```python
  6018. client = get_sync_client(url="http://localhost:8123")
  6019. items = client.store.search_items(
  6020. ["documents"],
  6021. filter={"author": "John Doe"},
  6022. limit=5,
  6023. offset=0
  6024. )
  6025. print(items)
  6026. ```
  6027. ```shell
  6028. ----------------------------------------------------------------
  6029. {
  6030. "items": [
  6031. {
  6032. "namespace": ["documents", "user123"],
  6033. "key": "item789",
  6034. "value": {
  6035. "title": "Another Document",
  6036. "author": "John Doe"
  6037. },
  6038. "created_at": "2024-07-30T12:00:00Z",
  6039. "updated_at": "2024-07-30T12:00:00Z"
  6040. },
  6041. # ... additional items ...
  6042. ]
  6043. }
  6044. ```
  6045. """
  6046. payload = {
  6047. "namespace_prefix": namespace_prefix,
  6048. "filter": filter,
  6049. "limit": limit,
  6050. "offset": offset,
  6051. "query": query,
  6052. "refresh_ttl": refresh_ttl,
  6053. }
  6054. return self.http.post(
  6055. "/store/items/search",
  6056. json=_provided_vals(payload),
  6057. headers=headers,
  6058. params=params,
  6059. )
  6060. def list_namespaces(
  6061. self,
  6062. prefix: list[str] | None = None,
  6063. suffix: list[str] | None = None,
  6064. max_depth: int | None = None,
  6065. limit: int = 100,
  6066. offset: int = 0,
  6067. *,
  6068. headers: Mapping[str, str] | None = None,
  6069. params: QueryParamTypes | None = None,
  6070. ) -> ListNamespaceResponse:
  6071. """List namespaces with optional match conditions.
  6072. Args:
  6073. prefix: Optional list of strings representing the prefix to filter namespaces.
  6074. suffix: Optional list of strings representing the suffix to filter namespaces.
  6075. max_depth: Optional integer specifying the maximum depth of namespaces to return.
  6076. limit: Maximum number of namespaces to return (default is 100).
  6077. offset: Number of namespaces to skip before returning results (default is 0).
  6078. headers: Optional custom headers to include with the request.
  6079. Returns:
  6080. A list of namespaces matching the criteria.
  6081. ???+ example "Example Usage"
  6082. ```python
  6083. client = get_sync_client(url="http://localhost:8123")
  6084. namespaces = client.store.list_namespaces(
  6085. prefix=["documents"],
  6086. max_depth=3,
  6087. limit=10,
  6088. offset=0
  6089. )
  6090. print(namespaces)
  6091. ```
  6092. ```shell
  6093. ----------------------------------------------------------------
  6094. [
  6095. ["documents", "user123", "reports"],
  6096. ["documents", "user456", "invoices"],
  6097. ...
  6098. ]
  6099. ```
  6100. """
  6101. payload = {
  6102. "prefix": prefix,
  6103. "suffix": suffix,
  6104. "max_depth": max_depth,
  6105. "limit": limit,
  6106. "offset": offset,
  6107. }
  6108. return self.http.post(
  6109. "/store/namespaces",
  6110. json=_provided_vals(payload),
  6111. headers=headers,
  6112. params=params,
  6113. )
  6114. def _provided_vals(d: Mapping[str, Any]) -> dict[str, Any]:
  6115. return {k: v for k, v in d.items() if v is not None}
  6116. _registered_transports: list[httpx.ASGITransport] = []
  6117. # Do not move; this is used in the server.
  6118. def configure_loopback_transports(app: Any) -> None:
  6119. for transport in _registered_transports:
  6120. transport.app = app
  6121. @functools.lru_cache(maxsize=1)
  6122. def get_asgi_transport() -> type[httpx.ASGITransport]:
  6123. try:
  6124. from langgraph_api import asgi_transport # type: ignore[unresolved-import]
  6125. return asgi_transport.ASGITransport
  6126. except ImportError:
  6127. # Older versions of the server
  6128. return httpx.ASGITransport
  6129. TimeoutTypes = (
  6130. None
  6131. | float
  6132. | tuple[float | None, float | None]
  6133. | tuple[float | None, float | None, float | None, float | None]
  6134. | httpx.Timeout
  6135. )