images.py 93 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858
  1. # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
  2. from __future__ import annotations
  3. from typing import Union, Mapping, Optional, cast
  4. from typing_extensions import Literal, overload
  5. import httpx
  6. from .. import _legacy_response
  7. from ..types import image_edit_params, image_generate_params, image_create_variation_params
  8. from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given
  9. from .._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform
  10. from .._compat import cached_property
  11. from .._resource import SyncAPIResource, AsyncAPIResource
  12. from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
  13. from .._streaming import Stream, AsyncStream
  14. from .._base_client import make_request_options
  15. from ..types.image_model import ImageModel
  16. from ..types.images_response import ImagesResponse
  17. from ..types.image_gen_stream_event import ImageGenStreamEvent
  18. from ..types.image_edit_stream_event import ImageEditStreamEvent
  19. __all__ = ["Images", "AsyncImages"]
  20. class Images(SyncAPIResource):
  21. @cached_property
  22. def with_raw_response(self) -> ImagesWithRawResponse:
  23. """
  24. This property can be used as a prefix for any HTTP method call to return
  25. the raw response object instead of the parsed content.
  26. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
  27. """
  28. return ImagesWithRawResponse(self)
  29. @cached_property
  30. def with_streaming_response(self) -> ImagesWithStreamingResponse:
  31. """
  32. An alternative to `.with_raw_response` that doesn't eagerly read the response body.
  33. For more information, see https://www.github.com/openai/openai-python#with_streaming_response
  34. """
  35. return ImagesWithStreamingResponse(self)
  36. def create_variation(
  37. self,
  38. *,
  39. image: FileTypes,
  40. model: Union[str, ImageModel, None] | Omit = omit,
  41. n: Optional[int] | Omit = omit,
  42. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  43. size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit,
  44. user: str | Omit = omit,
  45. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  46. # The extra values given here take precedence over values defined on the client or passed to this method.
  47. extra_headers: Headers | None = None,
  48. extra_query: Query | None = None,
  49. extra_body: Body | None = None,
  50. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  51. ) -> ImagesResponse:
  52. """Creates a variation of a given image.
  53. This endpoint only supports `dall-e-2`.
  54. Args:
  55. image: The image to use as the basis for the variation(s). Must be a valid PNG file,
  56. less than 4MB, and square.
  57. model: The model to use for image generation. Only `dall-e-2` is supported at this
  58. time.
  59. n: The number of images to generate. Must be between 1 and 10.
  60. response_format: The format in which the generated images are returned. Must be one of `url` or
  61. `b64_json`. URLs are only valid for 60 minutes after the image has been
  62. generated.
  63. size: The size of the generated images. Must be one of `256x256`, `512x512`, or
  64. `1024x1024`.
  65. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  66. and detect abuse.
  67. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  68. extra_headers: Send extra headers
  69. extra_query: Add additional query parameters to the request
  70. extra_body: Add additional JSON properties to the request
  71. timeout: Override the client-level default timeout for this request, in seconds
  72. """
  73. body = deepcopy_minimal(
  74. {
  75. "image": image,
  76. "model": model,
  77. "n": n,
  78. "response_format": response_format,
  79. "size": size,
  80. "user": user,
  81. }
  82. )
  83. files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
  84. # It should be noted that the actual Content-Type header that will be
  85. # sent to the server will contain a `boundary` parameter, e.g.
  86. # multipart/form-data; boundary=---abc--
  87. extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
  88. return self._post(
  89. "/images/variations",
  90. body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
  91. files=files,
  92. options=make_request_options(
  93. extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
  94. ),
  95. cast_to=ImagesResponse,
  96. )
  97. @overload
  98. def edit(
  99. self,
  100. *,
  101. image: Union[FileTypes, SequenceNotStr[FileTypes]],
  102. prompt: str,
  103. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  104. input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
  105. mask: FileTypes | Omit = omit,
  106. model: Union[str, ImageModel, None] | Omit = omit,
  107. n: Optional[int] | Omit = omit,
  108. output_compression: Optional[int] | Omit = omit,
  109. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  110. partial_images: Optional[int] | Omit = omit,
  111. quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
  112. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  113. size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
  114. stream: Optional[Literal[False]] | Omit = omit,
  115. user: str | Omit = omit,
  116. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  117. # The extra values given here take precedence over values defined on the client or passed to this method.
  118. extra_headers: Headers | None = None,
  119. extra_query: Query | None = None,
  120. extra_body: Body | None = None,
  121. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  122. ) -> ImagesResponse:
  123. """Creates an edited or extended image given one or more source images and a
  124. prompt.
  125. This endpoint only supports `gpt-image-1` and `dall-e-2`.
  126. Args:
  127. image: The image(s) to edit. Must be a supported image file or an array of images.
  128. For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
  129. 50MB. You can provide up to 16 images.
  130. For `dall-e-2`, you can only provide one image, and it should be a square `png`
  131. file less than 4MB.
  132. prompt: A text description of the desired image(s). The maximum length is 1000
  133. characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
  134. background: Allows to set transparency for the background of the generated image(s). This
  135. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  136. `opaque` or `auto` (default value). When `auto` is used, the model will
  137. automatically determine the best background for the image.
  138. If `transparent`, the output format needs to support transparency, so it should
  139. be set to either `png` (default value) or `webp`.
  140. input_fidelity: Control how much effort the model will exert to match the style and features,
  141. especially facial features, of input images. This parameter is only supported
  142. for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
  143. `low`. Defaults to `low`.
  144. mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
  145. indicate where `image` should be edited. If there are multiple images provided,
  146. the mask will be applied on the first image. Must be a valid PNG file, less than
  147. 4MB, and have the same dimensions as `image`.
  148. model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
  149. supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
  150. is used.
  151. n: The number of images to generate. Must be between 1 and 10.
  152. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  153. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  154. defaults to 100.
  155. output_format: The format in which the generated images are returned. This parameter is only
  156. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
  157. default value is `png`.
  158. partial_images: The number of partial images to generate. This parameter is used for streaming
  159. responses that return partial images. Value must be between 0 and 3. When set to
  160. 0, the response will be a single image sent in one streaming event.
  161. Note that the final image may be sent before the full number of partial images
  162. are generated if the full image is generated more quickly.
  163. quality: The quality of the image that will be generated. `high`, `medium` and `low` are
  164. only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
  165. Defaults to `auto`.
  166. response_format: The format in which the generated images are returned. Must be one of `url` or
  167. `b64_json`. URLs are only valid for 60 minutes after the image has been
  168. generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
  169. will always return base64-encoded images.
  170. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  171. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  172. `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
  173. stream: Edit the image in streaming mode. Defaults to `false`. See the
  174. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  175. for more information.
  176. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  177. and detect abuse.
  178. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  179. extra_headers: Send extra headers
  180. extra_query: Add additional query parameters to the request
  181. extra_body: Add additional JSON properties to the request
  182. timeout: Override the client-level default timeout for this request, in seconds
  183. """
  184. ...
  185. @overload
  186. def edit(
  187. self,
  188. *,
  189. image: Union[FileTypes, SequenceNotStr[FileTypes]],
  190. prompt: str,
  191. stream: Literal[True],
  192. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  193. input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
  194. mask: FileTypes | Omit = omit,
  195. model: Union[str, ImageModel, None] | Omit = omit,
  196. n: Optional[int] | Omit = omit,
  197. output_compression: Optional[int] | Omit = omit,
  198. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  199. partial_images: Optional[int] | Omit = omit,
  200. quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
  201. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  202. size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
  203. user: str | Omit = omit,
  204. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  205. # The extra values given here take precedence over values defined on the client or passed to this method.
  206. extra_headers: Headers | None = None,
  207. extra_query: Query | None = None,
  208. extra_body: Body | None = None,
  209. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  210. ) -> Stream[ImageEditStreamEvent]:
  211. """Creates an edited or extended image given one or more source images and a
  212. prompt.
  213. This endpoint only supports `gpt-image-1` and `dall-e-2`.
  214. Args:
  215. image: The image(s) to edit. Must be a supported image file or an array of images.
  216. For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
  217. 50MB. You can provide up to 16 images.
  218. For `dall-e-2`, you can only provide one image, and it should be a square `png`
  219. file less than 4MB.
  220. prompt: A text description of the desired image(s). The maximum length is 1000
  221. characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
  222. stream: Edit the image in streaming mode. Defaults to `false`. See the
  223. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  224. for more information.
  225. background: Allows to set transparency for the background of the generated image(s). This
  226. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  227. `opaque` or `auto` (default value). When `auto` is used, the model will
  228. automatically determine the best background for the image.
  229. If `transparent`, the output format needs to support transparency, so it should
  230. be set to either `png` (default value) or `webp`.
  231. input_fidelity: Control how much effort the model will exert to match the style and features,
  232. especially facial features, of input images. This parameter is only supported
  233. for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
  234. `low`. Defaults to `low`.
  235. mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
  236. indicate where `image` should be edited. If there are multiple images provided,
  237. the mask will be applied on the first image. Must be a valid PNG file, less than
  238. 4MB, and have the same dimensions as `image`.
  239. model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
  240. supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
  241. is used.
  242. n: The number of images to generate. Must be between 1 and 10.
  243. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  244. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  245. defaults to 100.
  246. output_format: The format in which the generated images are returned. This parameter is only
  247. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
  248. default value is `png`.
  249. partial_images: The number of partial images to generate. This parameter is used for streaming
  250. responses that return partial images. Value must be between 0 and 3. When set to
  251. 0, the response will be a single image sent in one streaming event.
  252. Note that the final image may be sent before the full number of partial images
  253. are generated if the full image is generated more quickly.
  254. quality: The quality of the image that will be generated. `high`, `medium` and `low` are
  255. only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
  256. Defaults to `auto`.
  257. response_format: The format in which the generated images are returned. Must be one of `url` or
  258. `b64_json`. URLs are only valid for 60 minutes after the image has been
  259. generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
  260. will always return base64-encoded images.
  261. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  262. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  263. `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
  264. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  265. and detect abuse.
  266. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  267. extra_headers: Send extra headers
  268. extra_query: Add additional query parameters to the request
  269. extra_body: Add additional JSON properties to the request
  270. timeout: Override the client-level default timeout for this request, in seconds
  271. """
  272. ...
  273. @overload
  274. def edit(
  275. self,
  276. *,
  277. image: Union[FileTypes, SequenceNotStr[FileTypes]],
  278. prompt: str,
  279. stream: bool,
  280. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  281. input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
  282. mask: FileTypes | Omit = omit,
  283. model: Union[str, ImageModel, None] | Omit = omit,
  284. n: Optional[int] | Omit = omit,
  285. output_compression: Optional[int] | Omit = omit,
  286. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  287. partial_images: Optional[int] | Omit = omit,
  288. quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
  289. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  290. size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
  291. user: str | Omit = omit,
  292. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  293. # The extra values given here take precedence over values defined on the client or passed to this method.
  294. extra_headers: Headers | None = None,
  295. extra_query: Query | None = None,
  296. extra_body: Body | None = None,
  297. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  298. ) -> ImagesResponse | Stream[ImageEditStreamEvent]:
  299. """Creates an edited or extended image given one or more source images and a
  300. prompt.
  301. This endpoint only supports `gpt-image-1` and `dall-e-2`.
  302. Args:
  303. image: The image(s) to edit. Must be a supported image file or an array of images.
  304. For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
  305. 50MB. You can provide up to 16 images.
  306. For `dall-e-2`, you can only provide one image, and it should be a square `png`
  307. file less than 4MB.
  308. prompt: A text description of the desired image(s). The maximum length is 1000
  309. characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
  310. stream: Edit the image in streaming mode. Defaults to `false`. See the
  311. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  312. for more information.
  313. background: Allows to set transparency for the background of the generated image(s). This
  314. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  315. `opaque` or `auto` (default value). When `auto` is used, the model will
  316. automatically determine the best background for the image.
  317. If `transparent`, the output format needs to support transparency, so it should
  318. be set to either `png` (default value) or `webp`.
  319. input_fidelity: Control how much effort the model will exert to match the style and features,
  320. especially facial features, of input images. This parameter is only supported
  321. for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
  322. `low`. Defaults to `low`.
  323. mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
  324. indicate where `image` should be edited. If there are multiple images provided,
  325. the mask will be applied on the first image. Must be a valid PNG file, less than
  326. 4MB, and have the same dimensions as `image`.
  327. model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
  328. supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
  329. is used.
  330. n: The number of images to generate. Must be between 1 and 10.
  331. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  332. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  333. defaults to 100.
  334. output_format: The format in which the generated images are returned. This parameter is only
  335. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
  336. default value is `png`.
  337. partial_images: The number of partial images to generate. This parameter is used for streaming
  338. responses that return partial images. Value must be between 0 and 3. When set to
  339. 0, the response will be a single image sent in one streaming event.
  340. Note that the final image may be sent before the full number of partial images
  341. are generated if the full image is generated more quickly.
  342. quality: The quality of the image that will be generated. `high`, `medium` and `low` are
  343. only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
  344. Defaults to `auto`.
  345. response_format: The format in which the generated images are returned. Must be one of `url` or
  346. `b64_json`. URLs are only valid for 60 minutes after the image has been
  347. generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
  348. will always return base64-encoded images.
  349. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  350. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  351. `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
  352. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  353. and detect abuse.
  354. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  355. extra_headers: Send extra headers
  356. extra_query: Add additional query parameters to the request
  357. extra_body: Add additional JSON properties to the request
  358. timeout: Override the client-level default timeout for this request, in seconds
  359. """
  360. ...
  361. @required_args(["image", "prompt"], ["image", "prompt", "stream"])
  362. def edit(
  363. self,
  364. *,
  365. image: Union[FileTypes, SequenceNotStr[FileTypes]],
  366. prompt: str,
  367. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  368. input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
  369. mask: FileTypes | Omit = omit,
  370. model: Union[str, ImageModel, None] | Omit = omit,
  371. n: Optional[int] | Omit = omit,
  372. output_compression: Optional[int] | Omit = omit,
  373. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  374. partial_images: Optional[int] | Omit = omit,
  375. quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
  376. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  377. size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
  378. stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
  379. user: str | Omit = omit,
  380. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  381. # The extra values given here take precedence over values defined on the client or passed to this method.
  382. extra_headers: Headers | None = None,
  383. extra_query: Query | None = None,
  384. extra_body: Body | None = None,
  385. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  386. ) -> ImagesResponse | Stream[ImageEditStreamEvent]:
  387. body = deepcopy_minimal(
  388. {
  389. "image": image,
  390. "prompt": prompt,
  391. "background": background,
  392. "input_fidelity": input_fidelity,
  393. "mask": mask,
  394. "model": model,
  395. "n": n,
  396. "output_compression": output_compression,
  397. "output_format": output_format,
  398. "partial_images": partial_images,
  399. "quality": quality,
  400. "response_format": response_format,
  401. "size": size,
  402. "stream": stream,
  403. "user": user,
  404. }
  405. )
  406. files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
  407. # It should be noted that the actual Content-Type header that will be
  408. # sent to the server will contain a `boundary` parameter, e.g.
  409. # multipart/form-data; boundary=---abc--
  410. extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
  411. return self._post(
  412. "/images/edits",
  413. body=maybe_transform(
  414. body,
  415. image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
  416. ),
  417. files=files,
  418. options=make_request_options(
  419. extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
  420. ),
  421. cast_to=ImagesResponse,
  422. stream=stream or False,
  423. stream_cls=Stream[ImageEditStreamEvent],
  424. )
  425. @overload
  426. def generate(
  427. self,
  428. *,
  429. prompt: str,
  430. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  431. model: Union[str, ImageModel, None] | Omit = omit,
  432. moderation: Optional[Literal["low", "auto"]] | Omit = omit,
  433. n: Optional[int] | Omit = omit,
  434. output_compression: Optional[int] | Omit = omit,
  435. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  436. partial_images: Optional[int] | Omit = omit,
  437. quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
  438. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  439. size: Optional[
  440. Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
  441. ]
  442. | Omit = omit,
  443. stream: Optional[Literal[False]] | Omit = omit,
  444. style: Optional[Literal["vivid", "natural"]] | Omit = omit,
  445. user: str | Omit = omit,
  446. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  447. # The extra values given here take precedence over values defined on the client or passed to this method.
  448. extra_headers: Headers | None = None,
  449. extra_query: Query | None = None,
  450. extra_body: Body | None = None,
  451. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  452. ) -> ImagesResponse:
  453. """
  454. Creates an image given a prompt.
  455. [Learn more](https://platform.openai.com/docs/guides/images).
  456. Args:
  457. prompt: A text description of the desired image(s). The maximum length is 32000
  458. characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
  459. for `dall-e-3`.
  460. background: Allows to set transparency for the background of the generated image(s). This
  461. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  462. `opaque` or `auto` (default value). When `auto` is used, the model will
  463. automatically determine the best background for the image.
  464. If `transparent`, the output format needs to support transparency, so it should
  465. be set to either `png` (default value) or `webp`.
  466. model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
  467. `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
  468. `gpt-image-1` is used.
  469. moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
  470. be either `low` for less restrictive filtering or `auto` (default value).
  471. n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
  472. `n=1` is supported.
  473. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  474. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  475. defaults to 100.
  476. output_format: The format in which the generated images are returned. This parameter is only
  477. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
  478. partial_images: The number of partial images to generate. This parameter is used for streaming
  479. responses that return partial images. Value must be between 0 and 3. When set to
  480. 0, the response will be a single image sent in one streaming event.
  481. Note that the final image may be sent before the full number of partial images
  482. are generated if the full image is generated more quickly.
  483. quality: The quality of the image that will be generated.
  484. - `auto` (default value) will automatically select the best quality for the
  485. given model.
  486. - `high`, `medium` and `low` are supported for `gpt-image-1`.
  487. - `hd` and `standard` are supported for `dall-e-3`.
  488. - `standard` is the only option for `dall-e-2`.
  489. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
  490. returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
  491. after the image has been generated. This parameter isn't supported for
  492. `gpt-image-1` which will always return base64-encoded images.
  493. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  494. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  495. `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
  496. one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
  497. stream: Generate the image in streaming mode. Defaults to `false`. See the
  498. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  499. for more information. This parameter is only supported for `gpt-image-1`.
  500. style: The style of the generated images. This parameter is only supported for
  501. `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
  502. towards generating hyper-real and dramatic images. Natural causes the model to
  503. produce more natural, less hyper-real looking images.
  504. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  505. and detect abuse.
  506. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  507. extra_headers: Send extra headers
  508. extra_query: Add additional query parameters to the request
  509. extra_body: Add additional JSON properties to the request
  510. timeout: Override the client-level default timeout for this request, in seconds
  511. """
  512. ...
  513. @overload
  514. def generate(
  515. self,
  516. *,
  517. prompt: str,
  518. stream: Literal[True],
  519. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  520. model: Union[str, ImageModel, None] | Omit = omit,
  521. moderation: Optional[Literal["low", "auto"]] | Omit = omit,
  522. n: Optional[int] | Omit = omit,
  523. output_compression: Optional[int] | Omit = omit,
  524. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  525. partial_images: Optional[int] | Omit = omit,
  526. quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
  527. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  528. size: Optional[
  529. Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
  530. ]
  531. | Omit = omit,
  532. style: Optional[Literal["vivid", "natural"]] | Omit = omit,
  533. user: str | Omit = omit,
  534. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  535. # The extra values given here take precedence over values defined on the client or passed to this method.
  536. extra_headers: Headers | None = None,
  537. extra_query: Query | None = None,
  538. extra_body: Body | None = None,
  539. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  540. ) -> Stream[ImageGenStreamEvent]:
  541. """
  542. Creates an image given a prompt.
  543. [Learn more](https://platform.openai.com/docs/guides/images).
  544. Args:
  545. prompt: A text description of the desired image(s). The maximum length is 32000
  546. characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
  547. for `dall-e-3`.
  548. stream: Generate the image in streaming mode. Defaults to `false`. See the
  549. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  550. for more information. This parameter is only supported for `gpt-image-1`.
  551. background: Allows to set transparency for the background of the generated image(s). This
  552. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  553. `opaque` or `auto` (default value). When `auto` is used, the model will
  554. automatically determine the best background for the image.
  555. If `transparent`, the output format needs to support transparency, so it should
  556. be set to either `png` (default value) or `webp`.
  557. model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
  558. `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
  559. `gpt-image-1` is used.
  560. moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
  561. be either `low` for less restrictive filtering or `auto` (default value).
  562. n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
  563. `n=1` is supported.
  564. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  565. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  566. defaults to 100.
  567. output_format: The format in which the generated images are returned. This parameter is only
  568. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
  569. partial_images: The number of partial images to generate. This parameter is used for streaming
  570. responses that return partial images. Value must be between 0 and 3. When set to
  571. 0, the response will be a single image sent in one streaming event.
  572. Note that the final image may be sent before the full number of partial images
  573. are generated if the full image is generated more quickly.
  574. quality: The quality of the image that will be generated.
  575. - `auto` (default value) will automatically select the best quality for the
  576. given model.
  577. - `high`, `medium` and `low` are supported for `gpt-image-1`.
  578. - `hd` and `standard` are supported for `dall-e-3`.
  579. - `standard` is the only option for `dall-e-2`.
  580. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
  581. returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
  582. after the image has been generated. This parameter isn't supported for
  583. `gpt-image-1` which will always return base64-encoded images.
  584. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  585. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  586. `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
  587. one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
  588. style: The style of the generated images. This parameter is only supported for
  589. `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
  590. towards generating hyper-real and dramatic images. Natural causes the model to
  591. produce more natural, less hyper-real looking images.
  592. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  593. and detect abuse.
  594. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  595. extra_headers: Send extra headers
  596. extra_query: Add additional query parameters to the request
  597. extra_body: Add additional JSON properties to the request
  598. timeout: Override the client-level default timeout for this request, in seconds
  599. """
  600. ...
  601. @overload
  602. def generate(
  603. self,
  604. *,
  605. prompt: str,
  606. stream: bool,
  607. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  608. model: Union[str, ImageModel, None] | Omit = omit,
  609. moderation: Optional[Literal["low", "auto"]] | Omit = omit,
  610. n: Optional[int] | Omit = omit,
  611. output_compression: Optional[int] | Omit = omit,
  612. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  613. partial_images: Optional[int] | Omit = omit,
  614. quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
  615. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  616. size: Optional[
  617. Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
  618. ]
  619. | Omit = omit,
  620. style: Optional[Literal["vivid", "natural"]] | Omit = omit,
  621. user: str | Omit = omit,
  622. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  623. # The extra values given here take precedence over values defined on the client or passed to this method.
  624. extra_headers: Headers | None = None,
  625. extra_query: Query | None = None,
  626. extra_body: Body | None = None,
  627. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  628. ) -> ImagesResponse | Stream[ImageGenStreamEvent]:
  629. """
  630. Creates an image given a prompt.
  631. [Learn more](https://platform.openai.com/docs/guides/images).
  632. Args:
  633. prompt: A text description of the desired image(s). The maximum length is 32000
  634. characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
  635. for `dall-e-3`.
  636. stream: Generate the image in streaming mode. Defaults to `false`. See the
  637. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  638. for more information. This parameter is only supported for `gpt-image-1`.
  639. background: Allows to set transparency for the background of the generated image(s). This
  640. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  641. `opaque` or `auto` (default value). When `auto` is used, the model will
  642. automatically determine the best background for the image.
  643. If `transparent`, the output format needs to support transparency, so it should
  644. be set to either `png` (default value) or `webp`.
  645. model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
  646. `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
  647. `gpt-image-1` is used.
  648. moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
  649. be either `low` for less restrictive filtering or `auto` (default value).
  650. n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
  651. `n=1` is supported.
  652. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  653. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  654. defaults to 100.
  655. output_format: The format in which the generated images are returned. This parameter is only
  656. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
  657. partial_images: The number of partial images to generate. This parameter is used for streaming
  658. responses that return partial images. Value must be between 0 and 3. When set to
  659. 0, the response will be a single image sent in one streaming event.
  660. Note that the final image may be sent before the full number of partial images
  661. are generated if the full image is generated more quickly.
  662. quality: The quality of the image that will be generated.
  663. - `auto` (default value) will automatically select the best quality for the
  664. given model.
  665. - `high`, `medium` and `low` are supported for `gpt-image-1`.
  666. - `hd` and `standard` are supported for `dall-e-3`.
  667. - `standard` is the only option for `dall-e-2`.
  668. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
  669. returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
  670. after the image has been generated. This parameter isn't supported for
  671. `gpt-image-1` which will always return base64-encoded images.
  672. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  673. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  674. `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
  675. one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
  676. style: The style of the generated images. This parameter is only supported for
  677. `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
  678. towards generating hyper-real and dramatic images. Natural causes the model to
  679. produce more natural, less hyper-real looking images.
  680. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  681. and detect abuse.
  682. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  683. extra_headers: Send extra headers
  684. extra_query: Add additional query parameters to the request
  685. extra_body: Add additional JSON properties to the request
  686. timeout: Override the client-level default timeout for this request, in seconds
  687. """
  688. ...
  689. @required_args(["prompt"], ["prompt", "stream"])
  690. def generate(
  691. self,
  692. *,
  693. prompt: str,
  694. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  695. model: Union[str, ImageModel, None] | Omit = omit,
  696. moderation: Optional[Literal["low", "auto"]] | Omit = omit,
  697. n: Optional[int] | Omit = omit,
  698. output_compression: Optional[int] | Omit = omit,
  699. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  700. partial_images: Optional[int] | Omit = omit,
  701. quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
  702. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  703. size: Optional[
  704. Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
  705. ]
  706. | Omit = omit,
  707. stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
  708. style: Optional[Literal["vivid", "natural"]] | Omit = omit,
  709. user: str | Omit = omit,
  710. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  711. # The extra values given here take precedence over values defined on the client or passed to this method.
  712. extra_headers: Headers | None = None,
  713. extra_query: Query | None = None,
  714. extra_body: Body | None = None,
  715. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  716. ) -> ImagesResponse | Stream[ImageGenStreamEvent]:
  717. return self._post(
  718. "/images/generations",
  719. body=maybe_transform(
  720. {
  721. "prompt": prompt,
  722. "background": background,
  723. "model": model,
  724. "moderation": moderation,
  725. "n": n,
  726. "output_compression": output_compression,
  727. "output_format": output_format,
  728. "partial_images": partial_images,
  729. "quality": quality,
  730. "response_format": response_format,
  731. "size": size,
  732. "stream": stream,
  733. "style": style,
  734. "user": user,
  735. },
  736. image_generate_params.ImageGenerateParamsStreaming
  737. if stream
  738. else image_generate_params.ImageGenerateParamsNonStreaming,
  739. ),
  740. options=make_request_options(
  741. extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
  742. ),
  743. cast_to=ImagesResponse,
  744. stream=stream or False,
  745. stream_cls=Stream[ImageGenStreamEvent],
  746. )
  747. class AsyncImages(AsyncAPIResource):
  748. @cached_property
  749. def with_raw_response(self) -> AsyncImagesWithRawResponse:
  750. """
  751. This property can be used as a prefix for any HTTP method call to return
  752. the raw response object instead of the parsed content.
  753. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
  754. """
  755. return AsyncImagesWithRawResponse(self)
  756. @cached_property
  757. def with_streaming_response(self) -> AsyncImagesWithStreamingResponse:
  758. """
  759. An alternative to `.with_raw_response` that doesn't eagerly read the response body.
  760. For more information, see https://www.github.com/openai/openai-python#with_streaming_response
  761. """
  762. return AsyncImagesWithStreamingResponse(self)
  763. async def create_variation(
  764. self,
  765. *,
  766. image: FileTypes,
  767. model: Union[str, ImageModel, None] | Omit = omit,
  768. n: Optional[int] | Omit = omit,
  769. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  770. size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit,
  771. user: str | Omit = omit,
  772. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  773. # The extra values given here take precedence over values defined on the client or passed to this method.
  774. extra_headers: Headers | None = None,
  775. extra_query: Query | None = None,
  776. extra_body: Body | None = None,
  777. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  778. ) -> ImagesResponse:
  779. """Creates a variation of a given image.
  780. This endpoint only supports `dall-e-2`.
  781. Args:
  782. image: The image to use as the basis for the variation(s). Must be a valid PNG file,
  783. less than 4MB, and square.
  784. model: The model to use for image generation. Only `dall-e-2` is supported at this
  785. time.
  786. n: The number of images to generate. Must be between 1 and 10.
  787. response_format: The format in which the generated images are returned. Must be one of `url` or
  788. `b64_json`. URLs are only valid for 60 minutes after the image has been
  789. generated.
  790. size: The size of the generated images. Must be one of `256x256`, `512x512`, or
  791. `1024x1024`.
  792. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  793. and detect abuse.
  794. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  795. extra_headers: Send extra headers
  796. extra_query: Add additional query parameters to the request
  797. extra_body: Add additional JSON properties to the request
  798. timeout: Override the client-level default timeout for this request, in seconds
  799. """
  800. body = deepcopy_minimal(
  801. {
  802. "image": image,
  803. "model": model,
  804. "n": n,
  805. "response_format": response_format,
  806. "size": size,
  807. "user": user,
  808. }
  809. )
  810. files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
  811. # It should be noted that the actual Content-Type header that will be
  812. # sent to the server will contain a `boundary` parameter, e.g.
  813. # multipart/form-data; boundary=---abc--
  814. extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
  815. return await self._post(
  816. "/images/variations",
  817. body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
  818. files=files,
  819. options=make_request_options(
  820. extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
  821. ),
  822. cast_to=ImagesResponse,
  823. )
  824. @overload
  825. async def edit(
  826. self,
  827. *,
  828. image: Union[FileTypes, SequenceNotStr[FileTypes]],
  829. prompt: str,
  830. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  831. input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
  832. mask: FileTypes | Omit = omit,
  833. model: Union[str, ImageModel, None] | Omit = omit,
  834. n: Optional[int] | Omit = omit,
  835. output_compression: Optional[int] | Omit = omit,
  836. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  837. partial_images: Optional[int] | Omit = omit,
  838. quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
  839. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  840. size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
  841. stream: Optional[Literal[False]] | Omit = omit,
  842. user: str | Omit = omit,
  843. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  844. # The extra values given here take precedence over values defined on the client or passed to this method.
  845. extra_headers: Headers | None = None,
  846. extra_query: Query | None = None,
  847. extra_body: Body | None = None,
  848. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  849. ) -> ImagesResponse:
  850. """Creates an edited or extended image given one or more source images and a
  851. prompt.
  852. This endpoint only supports `gpt-image-1` and `dall-e-2`.
  853. Args:
  854. image: The image(s) to edit. Must be a supported image file or an array of images.
  855. For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
  856. 50MB. You can provide up to 16 images.
  857. For `dall-e-2`, you can only provide one image, and it should be a square `png`
  858. file less than 4MB.
  859. prompt: A text description of the desired image(s). The maximum length is 1000
  860. characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
  861. background: Allows to set transparency for the background of the generated image(s). This
  862. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  863. `opaque` or `auto` (default value). When `auto` is used, the model will
  864. automatically determine the best background for the image.
  865. If `transparent`, the output format needs to support transparency, so it should
  866. be set to either `png` (default value) or `webp`.
  867. input_fidelity: Control how much effort the model will exert to match the style and features,
  868. especially facial features, of input images. This parameter is only supported
  869. for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
  870. `low`. Defaults to `low`.
  871. mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
  872. indicate where `image` should be edited. If there are multiple images provided,
  873. the mask will be applied on the first image. Must be a valid PNG file, less than
  874. 4MB, and have the same dimensions as `image`.
  875. model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
  876. supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
  877. is used.
  878. n: The number of images to generate. Must be between 1 and 10.
  879. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  880. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  881. defaults to 100.
  882. output_format: The format in which the generated images are returned. This parameter is only
  883. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
  884. default value is `png`.
  885. partial_images: The number of partial images to generate. This parameter is used for streaming
  886. responses that return partial images. Value must be between 0 and 3. When set to
  887. 0, the response will be a single image sent in one streaming event.
  888. Note that the final image may be sent before the full number of partial images
  889. are generated if the full image is generated more quickly.
  890. quality: The quality of the image that will be generated. `high`, `medium` and `low` are
  891. only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
  892. Defaults to `auto`.
  893. response_format: The format in which the generated images are returned. Must be one of `url` or
  894. `b64_json`. URLs are only valid for 60 minutes after the image has been
  895. generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
  896. will always return base64-encoded images.
  897. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  898. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  899. `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
  900. stream: Edit the image in streaming mode. Defaults to `false`. See the
  901. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  902. for more information.
  903. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  904. and detect abuse.
  905. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  906. extra_headers: Send extra headers
  907. extra_query: Add additional query parameters to the request
  908. extra_body: Add additional JSON properties to the request
  909. timeout: Override the client-level default timeout for this request, in seconds
  910. """
  911. ...
  912. @overload
  913. async def edit(
  914. self,
  915. *,
  916. image: Union[FileTypes, SequenceNotStr[FileTypes]],
  917. prompt: str,
  918. stream: Literal[True],
  919. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  920. input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
  921. mask: FileTypes | Omit = omit,
  922. model: Union[str, ImageModel, None] | Omit = omit,
  923. n: Optional[int] | Omit = omit,
  924. output_compression: Optional[int] | Omit = omit,
  925. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  926. partial_images: Optional[int] | Omit = omit,
  927. quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
  928. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  929. size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
  930. user: str | Omit = omit,
  931. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  932. # The extra values given here take precedence over values defined on the client or passed to this method.
  933. extra_headers: Headers | None = None,
  934. extra_query: Query | None = None,
  935. extra_body: Body | None = None,
  936. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  937. ) -> AsyncStream[ImageEditStreamEvent]:
  938. """Creates an edited or extended image given one or more source images and a
  939. prompt.
  940. This endpoint only supports `gpt-image-1` and `dall-e-2`.
  941. Args:
  942. image: The image(s) to edit. Must be a supported image file or an array of images.
  943. For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
  944. 50MB. You can provide up to 16 images.
  945. For `dall-e-2`, you can only provide one image, and it should be a square `png`
  946. file less than 4MB.
  947. prompt: A text description of the desired image(s). The maximum length is 1000
  948. characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
  949. stream: Edit the image in streaming mode. Defaults to `false`. See the
  950. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  951. for more information.
  952. background: Allows to set transparency for the background of the generated image(s). This
  953. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  954. `opaque` or `auto` (default value). When `auto` is used, the model will
  955. automatically determine the best background for the image.
  956. If `transparent`, the output format needs to support transparency, so it should
  957. be set to either `png` (default value) or `webp`.
  958. input_fidelity: Control how much effort the model will exert to match the style and features,
  959. especially facial features, of input images. This parameter is only supported
  960. for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
  961. `low`. Defaults to `low`.
  962. mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
  963. indicate where `image` should be edited. If there are multiple images provided,
  964. the mask will be applied on the first image. Must be a valid PNG file, less than
  965. 4MB, and have the same dimensions as `image`.
  966. model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
  967. supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
  968. is used.
  969. n: The number of images to generate. Must be between 1 and 10.
  970. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  971. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  972. defaults to 100.
  973. output_format: The format in which the generated images are returned. This parameter is only
  974. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
  975. default value is `png`.
  976. partial_images: The number of partial images to generate. This parameter is used for streaming
  977. responses that return partial images. Value must be between 0 and 3. When set to
  978. 0, the response will be a single image sent in one streaming event.
  979. Note that the final image may be sent before the full number of partial images
  980. are generated if the full image is generated more quickly.
  981. quality: The quality of the image that will be generated. `high`, `medium` and `low` are
  982. only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
  983. Defaults to `auto`.
  984. response_format: The format in which the generated images are returned. Must be one of `url` or
  985. `b64_json`. URLs are only valid for 60 minutes after the image has been
  986. generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
  987. will always return base64-encoded images.
  988. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  989. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  990. `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
  991. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  992. and detect abuse.
  993. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  994. extra_headers: Send extra headers
  995. extra_query: Add additional query parameters to the request
  996. extra_body: Add additional JSON properties to the request
  997. timeout: Override the client-level default timeout for this request, in seconds
  998. """
  999. ...
  1000. @overload
  1001. async def edit(
  1002. self,
  1003. *,
  1004. image: Union[FileTypes, SequenceNotStr[FileTypes]],
  1005. prompt: str,
  1006. stream: bool,
  1007. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  1008. input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
  1009. mask: FileTypes | Omit = omit,
  1010. model: Union[str, ImageModel, None] | Omit = omit,
  1011. n: Optional[int] | Omit = omit,
  1012. output_compression: Optional[int] | Omit = omit,
  1013. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  1014. partial_images: Optional[int] | Omit = omit,
  1015. quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
  1016. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  1017. size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
  1018. user: str | Omit = omit,
  1019. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  1020. # The extra values given here take precedence over values defined on the client or passed to this method.
  1021. extra_headers: Headers | None = None,
  1022. extra_query: Query | None = None,
  1023. extra_body: Body | None = None,
  1024. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  1025. ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
  1026. """Creates an edited or extended image given one or more source images and a
  1027. prompt.
  1028. This endpoint only supports `gpt-image-1` and `dall-e-2`.
  1029. Args:
  1030. image: The image(s) to edit. Must be a supported image file or an array of images.
  1031. For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
  1032. 50MB. You can provide up to 16 images.
  1033. For `dall-e-2`, you can only provide one image, and it should be a square `png`
  1034. file less than 4MB.
  1035. prompt: A text description of the desired image(s). The maximum length is 1000
  1036. characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
  1037. stream: Edit the image in streaming mode. Defaults to `false`. See the
  1038. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  1039. for more information.
  1040. background: Allows to set transparency for the background of the generated image(s). This
  1041. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  1042. `opaque` or `auto` (default value). When `auto` is used, the model will
  1043. automatically determine the best background for the image.
  1044. If `transparent`, the output format needs to support transparency, so it should
  1045. be set to either `png` (default value) or `webp`.
  1046. input_fidelity: Control how much effort the model will exert to match the style and features,
  1047. especially facial features, of input images. This parameter is only supported
  1048. for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
  1049. `low`. Defaults to `low`.
  1050. mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
  1051. indicate where `image` should be edited. If there are multiple images provided,
  1052. the mask will be applied on the first image. Must be a valid PNG file, less than
  1053. 4MB, and have the same dimensions as `image`.
  1054. model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
  1055. supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
  1056. is used.
  1057. n: The number of images to generate. Must be between 1 and 10.
  1058. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  1059. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  1060. defaults to 100.
  1061. output_format: The format in which the generated images are returned. This parameter is only
  1062. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
  1063. default value is `png`.
  1064. partial_images: The number of partial images to generate. This parameter is used for streaming
  1065. responses that return partial images. Value must be between 0 and 3. When set to
  1066. 0, the response will be a single image sent in one streaming event.
  1067. Note that the final image may be sent before the full number of partial images
  1068. are generated if the full image is generated more quickly.
  1069. quality: The quality of the image that will be generated. `high`, `medium` and `low` are
  1070. only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
  1071. Defaults to `auto`.
  1072. response_format: The format in which the generated images are returned. Must be one of `url` or
  1073. `b64_json`. URLs are only valid for 60 minutes after the image has been
  1074. generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
  1075. will always return base64-encoded images.
  1076. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  1077. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  1078. `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
  1079. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  1080. and detect abuse.
  1081. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  1082. extra_headers: Send extra headers
  1083. extra_query: Add additional query parameters to the request
  1084. extra_body: Add additional JSON properties to the request
  1085. timeout: Override the client-level default timeout for this request, in seconds
  1086. """
  1087. ...
  1088. @required_args(["image", "prompt"], ["image", "prompt", "stream"])
  1089. async def edit(
  1090. self,
  1091. *,
  1092. image: Union[FileTypes, SequenceNotStr[FileTypes]],
  1093. prompt: str,
  1094. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  1095. input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
  1096. mask: FileTypes | Omit = omit,
  1097. model: Union[str, ImageModel, None] | Omit = omit,
  1098. n: Optional[int] | Omit = omit,
  1099. output_compression: Optional[int] | Omit = omit,
  1100. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  1101. partial_images: Optional[int] | Omit = omit,
  1102. quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
  1103. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  1104. size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
  1105. stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
  1106. user: str | Omit = omit,
  1107. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  1108. # The extra values given here take precedence over values defined on the client or passed to this method.
  1109. extra_headers: Headers | None = None,
  1110. extra_query: Query | None = None,
  1111. extra_body: Body | None = None,
  1112. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  1113. ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
  1114. body = deepcopy_minimal(
  1115. {
  1116. "image": image,
  1117. "prompt": prompt,
  1118. "background": background,
  1119. "input_fidelity": input_fidelity,
  1120. "mask": mask,
  1121. "model": model,
  1122. "n": n,
  1123. "output_compression": output_compression,
  1124. "output_format": output_format,
  1125. "partial_images": partial_images,
  1126. "quality": quality,
  1127. "response_format": response_format,
  1128. "size": size,
  1129. "stream": stream,
  1130. "user": user,
  1131. }
  1132. )
  1133. files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
  1134. # It should be noted that the actual Content-Type header that will be
  1135. # sent to the server will contain a `boundary` parameter, e.g.
  1136. # multipart/form-data; boundary=---abc--
  1137. extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
  1138. return await self._post(
  1139. "/images/edits",
  1140. body=await async_maybe_transform(
  1141. body,
  1142. image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
  1143. ),
  1144. files=files,
  1145. options=make_request_options(
  1146. extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
  1147. ),
  1148. cast_to=ImagesResponse,
  1149. stream=stream or False,
  1150. stream_cls=AsyncStream[ImageEditStreamEvent],
  1151. )
  1152. @overload
  1153. async def generate(
  1154. self,
  1155. *,
  1156. prompt: str,
  1157. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  1158. model: Union[str, ImageModel, None] | Omit = omit,
  1159. moderation: Optional[Literal["low", "auto"]] | Omit = omit,
  1160. n: Optional[int] | Omit = omit,
  1161. output_compression: Optional[int] | Omit = omit,
  1162. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  1163. partial_images: Optional[int] | Omit = omit,
  1164. quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
  1165. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  1166. size: Optional[
  1167. Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
  1168. ]
  1169. | Omit = omit,
  1170. stream: Optional[Literal[False]] | Omit = omit,
  1171. style: Optional[Literal["vivid", "natural"]] | Omit = omit,
  1172. user: str | Omit = omit,
  1173. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  1174. # The extra values given here take precedence over values defined on the client or passed to this method.
  1175. extra_headers: Headers | None = None,
  1176. extra_query: Query | None = None,
  1177. extra_body: Body | None = None,
  1178. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  1179. ) -> ImagesResponse:
  1180. """
  1181. Creates an image given a prompt.
  1182. [Learn more](https://platform.openai.com/docs/guides/images).
  1183. Args:
  1184. prompt: A text description of the desired image(s). The maximum length is 32000
  1185. characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
  1186. for `dall-e-3`.
  1187. background: Allows to set transparency for the background of the generated image(s). This
  1188. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  1189. `opaque` or `auto` (default value). When `auto` is used, the model will
  1190. automatically determine the best background for the image.
  1191. If `transparent`, the output format needs to support transparency, so it should
  1192. be set to either `png` (default value) or `webp`.
  1193. model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
  1194. `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
  1195. `gpt-image-1` is used.
  1196. moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
  1197. be either `low` for less restrictive filtering or `auto` (default value).
  1198. n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
  1199. `n=1` is supported.
  1200. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  1201. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  1202. defaults to 100.
  1203. output_format: The format in which the generated images are returned. This parameter is only
  1204. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
  1205. partial_images: The number of partial images to generate. This parameter is used for streaming
  1206. responses that return partial images. Value must be between 0 and 3. When set to
  1207. 0, the response will be a single image sent in one streaming event.
  1208. Note that the final image may be sent before the full number of partial images
  1209. are generated if the full image is generated more quickly.
  1210. quality: The quality of the image that will be generated.
  1211. - `auto` (default value) will automatically select the best quality for the
  1212. given model.
  1213. - `high`, `medium` and `low` are supported for `gpt-image-1`.
  1214. - `hd` and `standard` are supported for `dall-e-3`.
  1215. - `standard` is the only option for `dall-e-2`.
  1216. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
  1217. returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
  1218. after the image has been generated. This parameter isn't supported for
  1219. `gpt-image-1` which will always return base64-encoded images.
  1220. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  1221. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  1222. `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
  1223. one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
  1224. stream: Generate the image in streaming mode. Defaults to `false`. See the
  1225. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  1226. for more information. This parameter is only supported for `gpt-image-1`.
  1227. style: The style of the generated images. This parameter is only supported for
  1228. `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
  1229. towards generating hyper-real and dramatic images. Natural causes the model to
  1230. produce more natural, less hyper-real looking images.
  1231. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  1232. and detect abuse.
  1233. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  1234. extra_headers: Send extra headers
  1235. extra_query: Add additional query parameters to the request
  1236. extra_body: Add additional JSON properties to the request
  1237. timeout: Override the client-level default timeout for this request, in seconds
  1238. """
  1239. ...
  1240. @overload
  1241. async def generate(
  1242. self,
  1243. *,
  1244. prompt: str,
  1245. stream: Literal[True],
  1246. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  1247. model: Union[str, ImageModel, None] | Omit = omit,
  1248. moderation: Optional[Literal["low", "auto"]] | Omit = omit,
  1249. n: Optional[int] | Omit = omit,
  1250. output_compression: Optional[int] | Omit = omit,
  1251. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  1252. partial_images: Optional[int] | Omit = omit,
  1253. quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
  1254. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  1255. size: Optional[
  1256. Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
  1257. ]
  1258. | Omit = omit,
  1259. style: Optional[Literal["vivid", "natural"]] | Omit = omit,
  1260. user: str | Omit = omit,
  1261. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  1262. # The extra values given here take precedence over values defined on the client or passed to this method.
  1263. extra_headers: Headers | None = None,
  1264. extra_query: Query | None = None,
  1265. extra_body: Body | None = None,
  1266. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  1267. ) -> AsyncStream[ImageGenStreamEvent]:
  1268. """
  1269. Creates an image given a prompt.
  1270. [Learn more](https://platform.openai.com/docs/guides/images).
  1271. Args:
  1272. prompt: A text description of the desired image(s). The maximum length is 32000
  1273. characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
  1274. for `dall-e-3`.
  1275. stream: Generate the image in streaming mode. Defaults to `false`. See the
  1276. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  1277. for more information. This parameter is only supported for `gpt-image-1`.
  1278. background: Allows to set transparency for the background of the generated image(s). This
  1279. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  1280. `opaque` or `auto` (default value). When `auto` is used, the model will
  1281. automatically determine the best background for the image.
  1282. If `transparent`, the output format needs to support transparency, so it should
  1283. be set to either `png` (default value) or `webp`.
  1284. model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
  1285. `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
  1286. `gpt-image-1` is used.
  1287. moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
  1288. be either `low` for less restrictive filtering or `auto` (default value).
  1289. n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
  1290. `n=1` is supported.
  1291. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  1292. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  1293. defaults to 100.
  1294. output_format: The format in which the generated images are returned. This parameter is only
  1295. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
  1296. partial_images: The number of partial images to generate. This parameter is used for streaming
  1297. responses that return partial images. Value must be between 0 and 3. When set to
  1298. 0, the response will be a single image sent in one streaming event.
  1299. Note that the final image may be sent before the full number of partial images
  1300. are generated if the full image is generated more quickly.
  1301. quality: The quality of the image that will be generated.
  1302. - `auto` (default value) will automatically select the best quality for the
  1303. given model.
  1304. - `high`, `medium` and `low` are supported for `gpt-image-1`.
  1305. - `hd` and `standard` are supported for `dall-e-3`.
  1306. - `standard` is the only option for `dall-e-2`.
  1307. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
  1308. returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
  1309. after the image has been generated. This parameter isn't supported for
  1310. `gpt-image-1` which will always return base64-encoded images.
  1311. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  1312. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  1313. `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
  1314. one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
  1315. style: The style of the generated images. This parameter is only supported for
  1316. `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
  1317. towards generating hyper-real and dramatic images. Natural causes the model to
  1318. produce more natural, less hyper-real looking images.
  1319. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  1320. and detect abuse.
  1321. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  1322. extra_headers: Send extra headers
  1323. extra_query: Add additional query parameters to the request
  1324. extra_body: Add additional JSON properties to the request
  1325. timeout: Override the client-level default timeout for this request, in seconds
  1326. """
  1327. ...
  1328. @overload
  1329. async def generate(
  1330. self,
  1331. *,
  1332. prompt: str,
  1333. stream: bool,
  1334. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  1335. model: Union[str, ImageModel, None] | Omit = omit,
  1336. moderation: Optional[Literal["low", "auto"]] | Omit = omit,
  1337. n: Optional[int] | Omit = omit,
  1338. output_compression: Optional[int] | Omit = omit,
  1339. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  1340. partial_images: Optional[int] | Omit = omit,
  1341. quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
  1342. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  1343. size: Optional[
  1344. Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
  1345. ]
  1346. | Omit = omit,
  1347. style: Optional[Literal["vivid", "natural"]] | Omit = omit,
  1348. user: str | Omit = omit,
  1349. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  1350. # The extra values given here take precedence over values defined on the client or passed to this method.
  1351. extra_headers: Headers | None = None,
  1352. extra_query: Query | None = None,
  1353. extra_body: Body | None = None,
  1354. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  1355. ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
  1356. """
  1357. Creates an image given a prompt.
  1358. [Learn more](https://platform.openai.com/docs/guides/images).
  1359. Args:
  1360. prompt: A text description of the desired image(s). The maximum length is 32000
  1361. characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
  1362. for `dall-e-3`.
  1363. stream: Generate the image in streaming mode. Defaults to `false`. See the
  1364. [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
  1365. for more information. This parameter is only supported for `gpt-image-1`.
  1366. background: Allows to set transparency for the background of the generated image(s). This
  1367. parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
  1368. `opaque` or `auto` (default value). When `auto` is used, the model will
  1369. automatically determine the best background for the image.
  1370. If `transparent`, the output format needs to support transparency, so it should
  1371. be set to either `png` (default value) or `webp`.
  1372. model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
  1373. `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
  1374. `gpt-image-1` is used.
  1375. moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
  1376. be either `low` for less restrictive filtering or `auto` (default value).
  1377. n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
  1378. `n=1` is supported.
  1379. output_compression: The compression level (0-100%) for the generated images. This parameter is only
  1380. supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
  1381. defaults to 100.
  1382. output_format: The format in which the generated images are returned. This parameter is only
  1383. supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
  1384. partial_images: The number of partial images to generate. This parameter is used for streaming
  1385. responses that return partial images. Value must be between 0 and 3. When set to
  1386. 0, the response will be a single image sent in one streaming event.
  1387. Note that the final image may be sent before the full number of partial images
  1388. are generated if the full image is generated more quickly.
  1389. quality: The quality of the image that will be generated.
  1390. - `auto` (default value) will automatically select the best quality for the
  1391. given model.
  1392. - `high`, `medium` and `low` are supported for `gpt-image-1`.
  1393. - `hd` and `standard` are supported for `dall-e-3`.
  1394. - `standard` is the only option for `dall-e-2`.
  1395. response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
  1396. returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
  1397. after the image has been generated. This parameter isn't supported for
  1398. `gpt-image-1` which will always return base64-encoded images.
  1399. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
  1400. (landscape), `1024x1536` (portrait), or `auto` (default value) for
  1401. `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
  1402. one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
  1403. style: The style of the generated images. This parameter is only supported for
  1404. `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
  1405. towards generating hyper-real and dramatic images. Natural causes the model to
  1406. produce more natural, less hyper-real looking images.
  1407. user: A unique identifier representing your end-user, which can help OpenAI to monitor
  1408. and detect abuse.
  1409. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
  1410. extra_headers: Send extra headers
  1411. extra_query: Add additional query parameters to the request
  1412. extra_body: Add additional JSON properties to the request
  1413. timeout: Override the client-level default timeout for this request, in seconds
  1414. """
  1415. ...
  1416. @required_args(["prompt"], ["prompt", "stream"])
  1417. async def generate(
  1418. self,
  1419. *,
  1420. prompt: str,
  1421. background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
  1422. model: Union[str, ImageModel, None] | Omit = omit,
  1423. moderation: Optional[Literal["low", "auto"]] | Omit = omit,
  1424. n: Optional[int] | Omit = omit,
  1425. output_compression: Optional[int] | Omit = omit,
  1426. output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
  1427. partial_images: Optional[int] | Omit = omit,
  1428. quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
  1429. response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
  1430. size: Optional[
  1431. Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
  1432. ]
  1433. | Omit = omit,
  1434. stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
  1435. style: Optional[Literal["vivid", "natural"]] | Omit = omit,
  1436. user: str | Omit = omit,
  1437. # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
  1438. # The extra values given here take precedence over values defined on the client or passed to this method.
  1439. extra_headers: Headers | None = None,
  1440. extra_query: Query | None = None,
  1441. extra_body: Body | None = None,
  1442. timeout: float | httpx.Timeout | None | NotGiven = not_given,
  1443. ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
  1444. return await self._post(
  1445. "/images/generations",
  1446. body=await async_maybe_transform(
  1447. {
  1448. "prompt": prompt,
  1449. "background": background,
  1450. "model": model,
  1451. "moderation": moderation,
  1452. "n": n,
  1453. "output_compression": output_compression,
  1454. "output_format": output_format,
  1455. "partial_images": partial_images,
  1456. "quality": quality,
  1457. "response_format": response_format,
  1458. "size": size,
  1459. "stream": stream,
  1460. "style": style,
  1461. "user": user,
  1462. },
  1463. image_generate_params.ImageGenerateParamsStreaming
  1464. if stream
  1465. else image_generate_params.ImageGenerateParamsNonStreaming,
  1466. ),
  1467. options=make_request_options(
  1468. extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
  1469. ),
  1470. cast_to=ImagesResponse,
  1471. stream=stream or False,
  1472. stream_cls=AsyncStream[ImageGenStreamEvent],
  1473. )
  1474. class ImagesWithRawResponse:
  1475. def __init__(self, images: Images) -> None:
  1476. self._images = images
  1477. self.create_variation = _legacy_response.to_raw_response_wrapper(
  1478. images.create_variation,
  1479. )
  1480. self.edit = _legacy_response.to_raw_response_wrapper(
  1481. images.edit,
  1482. )
  1483. self.generate = _legacy_response.to_raw_response_wrapper(
  1484. images.generate,
  1485. )
  1486. class AsyncImagesWithRawResponse:
  1487. def __init__(self, images: AsyncImages) -> None:
  1488. self._images = images
  1489. self.create_variation = _legacy_response.async_to_raw_response_wrapper(
  1490. images.create_variation,
  1491. )
  1492. self.edit = _legacy_response.async_to_raw_response_wrapper(
  1493. images.edit,
  1494. )
  1495. self.generate = _legacy_response.async_to_raw_response_wrapper(
  1496. images.generate,
  1497. )
  1498. class ImagesWithStreamingResponse:
  1499. def __init__(self, images: Images) -> None:
  1500. self._images = images
  1501. self.create_variation = to_streamed_response_wrapper(
  1502. images.create_variation,
  1503. )
  1504. self.edit = to_streamed_response_wrapper(
  1505. images.edit,
  1506. )
  1507. self.generate = to_streamed_response_wrapper(
  1508. images.generate,
  1509. )
  1510. class AsyncImagesWithStreamingResponse:
  1511. def __init__(self, images: AsyncImages) -> None:
  1512. self._images = images
  1513. self.create_variation = async_to_streamed_response_wrapper(
  1514. images.create_variation,
  1515. )
  1516. self.edit = async_to_streamed_response_wrapper(
  1517. images.edit,
  1518. )
  1519. self.generate = async_to_streamed_response_wrapper(
  1520. images.generate,
  1521. )