| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858 |
- # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
- from __future__ import annotations
- from typing import Union, Mapping, Optional, cast
- from typing_extensions import Literal, overload
- import httpx
- from .. import _legacy_response
- from ..types import image_edit_params, image_generate_params, image_create_variation_params
- from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given
- from .._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform
- from .._compat import cached_property
- from .._resource import SyncAPIResource, AsyncAPIResource
- from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
- from .._streaming import Stream, AsyncStream
- from .._base_client import make_request_options
- from ..types.image_model import ImageModel
- from ..types.images_response import ImagesResponse
- from ..types.image_gen_stream_event import ImageGenStreamEvent
- from ..types.image_edit_stream_event import ImageEditStreamEvent
- __all__ = ["Images", "AsyncImages"]
- class Images(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ImagesWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
- For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
- """
- return ImagesWithRawResponse(self)
- @cached_property
- def with_streaming_response(self) -> ImagesWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/openai/openai-python#with_streaming_response
- """
- return ImagesWithStreamingResponse(self)
- def create_variation(
- self,
- *,
- image: FileTypes,
- model: Union[str, ImageModel, None] | Omit = omit,
- n: Optional[int] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse:
- """Creates a variation of a given image.
- This endpoint only supports `dall-e-2`.
- Args:
- image: The image to use as the basis for the variation(s). Must be a valid PNG file,
- less than 4MB, and square.
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
- n: The number of images to generate. Must be between 1 and 10.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- "/images/variations",
- body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
- @overload
- def edit(
- self,
- *,
- image: Union[FileTypes, SequenceNotStr[FileTypes]],
- prompt: str,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
- mask: FileTypes | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
- stream: Optional[Literal[False]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse:
- """Creates an edited or extended image given one or more source images and a
- prompt.
- This endpoint only supports `gpt-image-1` and `dall-e-2`.
- Args:
- image: The image(s) to edit. Must be a supported image file or an array of images.
- For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
- For `dall-e-2`, you can only provide one image, and it should be a square `png`
- file less than 4MB.
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. If there are multiple images provided,
- the mask will be applied on the first image. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
- supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
- is used.
- n: The number of images to generate. Must be between 1 and 10.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
- default value is `png`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
- Defaults to `auto`.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
- will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
- stream: Edit the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @overload
- def edit(
- self,
- *,
- image: Union[FileTypes, SequenceNotStr[FileTypes]],
- prompt: str,
- stream: Literal[True],
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
- mask: FileTypes | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Stream[ImageEditStreamEvent]:
- """Creates an edited or extended image given one or more source images and a
- prompt.
- This endpoint only supports `gpt-image-1` and `dall-e-2`.
- Args:
- image: The image(s) to edit. Must be a supported image file or an array of images.
- For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
- For `dall-e-2`, you can only provide one image, and it should be a square `png`
- file less than 4MB.
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
- stream: Edit the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. If there are multiple images provided,
- the mask will be applied on the first image. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
- supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
- is used.
- n: The number of images to generate. Must be between 1 and 10.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
- default value is `png`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
- Defaults to `auto`.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
- will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @overload
- def edit(
- self,
- *,
- image: Union[FileTypes, SequenceNotStr[FileTypes]],
- prompt: str,
- stream: bool,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
- mask: FileTypes | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse | Stream[ImageEditStreamEvent]:
- """Creates an edited or extended image given one or more source images and a
- prompt.
- This endpoint only supports `gpt-image-1` and `dall-e-2`.
- Args:
- image: The image(s) to edit. Must be a supported image file or an array of images.
- For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
- For `dall-e-2`, you can only provide one image, and it should be a square `png`
- file less than 4MB.
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
- stream: Edit the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. If there are multiple images provided,
- the mask will be applied on the first image. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
- supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
- is used.
- n: The number of images to generate. Must be between 1 and 10.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
- default value is `png`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
- Defaults to `auto`.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
- will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @required_args(["image", "prompt"], ["image", "prompt", "stream"])
- def edit(
- self,
- *,
- image: Union[FileTypes, SequenceNotStr[FileTypes]],
- prompt: str,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
- mask: FileTypes | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
- stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse | Stream[ImageEditStreamEvent]:
- body = deepcopy_minimal(
- {
- "image": image,
- "prompt": prompt,
- "background": background,
- "input_fidelity": input_fidelity,
- "mask": mask,
- "model": model,
- "n": n,
- "output_compression": output_compression,
- "output_format": output_format,
- "partial_images": partial_images,
- "quality": quality,
- "response_format": response_format,
- "size": size,
- "stream": stream,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return self._post(
- "/images/edits",
- body=maybe_transform(
- body,
- image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
- ),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- stream=stream or False,
- stream_cls=Stream[ImageEditStreamEvent],
- )
- @overload
- def generate(
- self,
- *,
- prompt: str,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- moderation: Optional[Literal["low", "auto"]] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[
- Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
- ]
- | Omit = omit,
- stream: Optional[Literal[False]] | Omit = omit,
- style: Optional[Literal["vivid", "natural"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse:
- """
- Creates an image given a prompt.
- [Learn more](https://platform.openai.com/docs/guides/images).
- Args:
- prompt: A text description of the desired image(s). The maximum length is 32000
- characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
- for `dall-e-3`.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
- `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
- `gpt-image-1` is used.
- moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
- be either `low` for less restrictive filtering or `auto` (default value).
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated.
- - `auto` (default value) will automatically select the best quality for the
- given model.
- - `high`, `medium` and `low` are supported for `gpt-image-1`.
- - `hd` and `standard` are supported for `dall-e-3`.
- - `standard` is the only option for `dall-e-2`.
- response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
- returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
- after the image has been generated. This parameter isn't supported for
- `gpt-image-1` which will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
- one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
- stream: Generate the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information. This parameter is only supported for `gpt-image-1`.
- style: The style of the generated images. This parameter is only supported for
- `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
- towards generating hyper-real and dramatic images. Natural causes the model to
- produce more natural, less hyper-real looking images.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @overload
- def generate(
- self,
- *,
- prompt: str,
- stream: Literal[True],
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- moderation: Optional[Literal["low", "auto"]] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[
- Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
- ]
- | Omit = omit,
- style: Optional[Literal["vivid", "natural"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> Stream[ImageGenStreamEvent]:
- """
- Creates an image given a prompt.
- [Learn more](https://platform.openai.com/docs/guides/images).
- Args:
- prompt: A text description of the desired image(s). The maximum length is 32000
- characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
- for `dall-e-3`.
- stream: Generate the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information. This parameter is only supported for `gpt-image-1`.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
- `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
- `gpt-image-1` is used.
- moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
- be either `low` for less restrictive filtering or `auto` (default value).
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated.
- - `auto` (default value) will automatically select the best quality for the
- given model.
- - `high`, `medium` and `low` are supported for `gpt-image-1`.
- - `hd` and `standard` are supported for `dall-e-3`.
- - `standard` is the only option for `dall-e-2`.
- response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
- returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
- after the image has been generated. This parameter isn't supported for
- `gpt-image-1` which will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
- one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
- style: The style of the generated images. This parameter is only supported for
- `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
- towards generating hyper-real and dramatic images. Natural causes the model to
- produce more natural, less hyper-real looking images.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @overload
- def generate(
- self,
- *,
- prompt: str,
- stream: bool,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- moderation: Optional[Literal["low", "auto"]] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[
- Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
- ]
- | Omit = omit,
- style: Optional[Literal["vivid", "natural"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse | Stream[ImageGenStreamEvent]:
- """
- Creates an image given a prompt.
- [Learn more](https://platform.openai.com/docs/guides/images).
- Args:
- prompt: A text description of the desired image(s). The maximum length is 32000
- characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
- for `dall-e-3`.
- stream: Generate the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information. This parameter is only supported for `gpt-image-1`.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
- `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
- `gpt-image-1` is used.
- moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
- be either `low` for less restrictive filtering or `auto` (default value).
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated.
- - `auto` (default value) will automatically select the best quality for the
- given model.
- - `high`, `medium` and `low` are supported for `gpt-image-1`.
- - `hd` and `standard` are supported for `dall-e-3`.
- - `standard` is the only option for `dall-e-2`.
- response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
- returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
- after the image has been generated. This parameter isn't supported for
- `gpt-image-1` which will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
- one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
- style: The style of the generated images. This parameter is only supported for
- `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
- towards generating hyper-real and dramatic images. Natural causes the model to
- produce more natural, less hyper-real looking images.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @required_args(["prompt"], ["prompt", "stream"])
- def generate(
- self,
- *,
- prompt: str,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- moderation: Optional[Literal["low", "auto"]] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[
- Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
- ]
- | Omit = omit,
- stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
- style: Optional[Literal["vivid", "natural"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse | Stream[ImageGenStreamEvent]:
- return self._post(
- "/images/generations",
- body=maybe_transform(
- {
- "prompt": prompt,
- "background": background,
- "model": model,
- "moderation": moderation,
- "n": n,
- "output_compression": output_compression,
- "output_format": output_format,
- "partial_images": partial_images,
- "quality": quality,
- "response_format": response_format,
- "size": size,
- "stream": stream,
- "style": style,
- "user": user,
- },
- image_generate_params.ImageGenerateParamsStreaming
- if stream
- else image_generate_params.ImageGenerateParamsNonStreaming,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- stream=stream or False,
- stream_cls=Stream[ImageGenStreamEvent],
- )
- class AsyncImages(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncImagesWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
- For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncImagesWithRawResponse(self)
- @cached_property
- def with_streaming_response(self) -> AsyncImagesWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/openai/openai-python#with_streaming_response
- """
- return AsyncImagesWithStreamingResponse(self)
- async def create_variation(
- self,
- *,
- image: FileTypes,
- model: Union[str, ImageModel, None] | Omit = omit,
- n: Optional[int] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse:
- """Creates a variation of a given image.
- This endpoint only supports `dall-e-2`.
- Args:
- image: The image to use as the basis for the variation(s). Must be a valid PNG file,
- less than 4MB, and square.
- model: The model to use for image generation. Only `dall-e-2` is supported at this
- time.
- n: The number of images to generate. Must be between 1 and 10.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated.
- size: The size of the generated images. Must be one of `256x256`, `512x512`, or
- `1024x1024`.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- body = deepcopy_minimal(
- {
- "image": image,
- "model": model,
- "n": n,
- "response_format": response_format,
- "size": size,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/images/variations",
- body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- )
- @overload
- async def edit(
- self,
- *,
- image: Union[FileTypes, SequenceNotStr[FileTypes]],
- prompt: str,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
- mask: FileTypes | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
- stream: Optional[Literal[False]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse:
- """Creates an edited or extended image given one or more source images and a
- prompt.
- This endpoint only supports `gpt-image-1` and `dall-e-2`.
- Args:
- image: The image(s) to edit. Must be a supported image file or an array of images.
- For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
- For `dall-e-2`, you can only provide one image, and it should be a square `png`
- file less than 4MB.
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. If there are multiple images provided,
- the mask will be applied on the first image. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
- supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
- is used.
- n: The number of images to generate. Must be between 1 and 10.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
- default value is `png`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
- Defaults to `auto`.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
- will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
- stream: Edit the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @overload
- async def edit(
- self,
- *,
- image: Union[FileTypes, SequenceNotStr[FileTypes]],
- prompt: str,
- stream: Literal[True],
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
- mask: FileTypes | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> AsyncStream[ImageEditStreamEvent]:
- """Creates an edited or extended image given one or more source images and a
- prompt.
- This endpoint only supports `gpt-image-1` and `dall-e-2`.
- Args:
- image: The image(s) to edit. Must be a supported image file or an array of images.
- For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
- For `dall-e-2`, you can only provide one image, and it should be a square `png`
- file less than 4MB.
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
- stream: Edit the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. If there are multiple images provided,
- the mask will be applied on the first image. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
- supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
- is used.
- n: The number of images to generate. Must be between 1 and 10.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
- default value is `png`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
- Defaults to `auto`.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
- will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @overload
- async def edit(
- self,
- *,
- image: Union[FileTypes, SequenceNotStr[FileTypes]],
- prompt: str,
- stream: bool,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
- mask: FileTypes | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
- """Creates an edited or extended image given one or more source images and a
- prompt.
- This endpoint only supports `gpt-image-1` and `dall-e-2`.
- Args:
- image: The image(s) to edit. Must be a supported image file or an array of images.
- For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
- 50MB. You can provide up to 16 images.
- For `dall-e-2`, you can only provide one image, and it should be a square `png`
- file less than 4MB.
- prompt: A text description of the desired image(s). The maximum length is 1000
- characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
- stream: Edit the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- input_fidelity: Control how much effort the model will exert to match the style and features,
- especially facial features, of input images. This parameter is only supported
- for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
- `low`. Defaults to `low`.
- mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
- indicate where `image` should be edited. If there are multiple images provided,
- the mask will be applied on the first image. Must be a valid PNG file, less than
- 4MB, and have the same dimensions as `image`.
- model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
- supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
- is used.
- n: The number of images to generate. Must be between 1 and 10.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
- default value is `png`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated. `high`, `medium` and `low` are
- only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
- Defaults to `auto`.
- response_format: The format in which the generated images are returned. Must be one of `url` or
- `b64_json`. URLs are only valid for 60 minutes after the image has been
- generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
- will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @required_args(["image", "prompt"], ["image", "prompt", "stream"])
- async def edit(
- self,
- *,
- image: Union[FileTypes, SequenceNotStr[FileTypes]],
- prompt: str,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
- mask: FileTypes | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
- stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
- body = deepcopy_minimal(
- {
- "image": image,
- "prompt": prompt,
- "background": background,
- "input_fidelity": input_fidelity,
- "mask": mask,
- "model": model,
- "n": n,
- "output_compression": output_compression,
- "output_format": output_format,
- "partial_images": partial_images,
- "quality": quality,
- "response_format": response_format,
- "size": size,
- "stream": stream,
- "user": user,
- }
- )
- files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
- # It should be noted that the actual Content-Type header that will be
- # sent to the server will contain a `boundary` parameter, e.g.
- # multipart/form-data; boundary=---abc--
- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
- return await self._post(
- "/images/edits",
- body=await async_maybe_transform(
- body,
- image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
- ),
- files=files,
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- stream=stream or False,
- stream_cls=AsyncStream[ImageEditStreamEvent],
- )
- @overload
- async def generate(
- self,
- *,
- prompt: str,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- moderation: Optional[Literal["low", "auto"]] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[
- Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
- ]
- | Omit = omit,
- stream: Optional[Literal[False]] | Omit = omit,
- style: Optional[Literal["vivid", "natural"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse:
- """
- Creates an image given a prompt.
- [Learn more](https://platform.openai.com/docs/guides/images).
- Args:
- prompt: A text description of the desired image(s). The maximum length is 32000
- characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
- for `dall-e-3`.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
- `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
- `gpt-image-1` is used.
- moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
- be either `low` for less restrictive filtering or `auto` (default value).
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated.
- - `auto` (default value) will automatically select the best quality for the
- given model.
- - `high`, `medium` and `low` are supported for `gpt-image-1`.
- - `hd` and `standard` are supported for `dall-e-3`.
- - `standard` is the only option for `dall-e-2`.
- response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
- returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
- after the image has been generated. This parameter isn't supported for
- `gpt-image-1` which will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
- one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
- stream: Generate the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information. This parameter is only supported for `gpt-image-1`.
- style: The style of the generated images. This parameter is only supported for
- `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
- towards generating hyper-real and dramatic images. Natural causes the model to
- produce more natural, less hyper-real looking images.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @overload
- async def generate(
- self,
- *,
- prompt: str,
- stream: Literal[True],
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- moderation: Optional[Literal["low", "auto"]] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[
- Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
- ]
- | Omit = omit,
- style: Optional[Literal["vivid", "natural"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> AsyncStream[ImageGenStreamEvent]:
- """
- Creates an image given a prompt.
- [Learn more](https://platform.openai.com/docs/guides/images).
- Args:
- prompt: A text description of the desired image(s). The maximum length is 32000
- characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
- for `dall-e-3`.
- stream: Generate the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information. This parameter is only supported for `gpt-image-1`.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
- `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
- `gpt-image-1` is used.
- moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
- be either `low` for less restrictive filtering or `auto` (default value).
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated.
- - `auto` (default value) will automatically select the best quality for the
- given model.
- - `high`, `medium` and `low` are supported for `gpt-image-1`.
- - `hd` and `standard` are supported for `dall-e-3`.
- - `standard` is the only option for `dall-e-2`.
- response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
- returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
- after the image has been generated. This parameter isn't supported for
- `gpt-image-1` which will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
- one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
- style: The style of the generated images. This parameter is only supported for
- `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
- towards generating hyper-real and dramatic images. Natural causes the model to
- produce more natural, less hyper-real looking images.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @overload
- async def generate(
- self,
- *,
- prompt: str,
- stream: bool,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- moderation: Optional[Literal["low", "auto"]] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[
- Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
- ]
- | Omit = omit,
- style: Optional[Literal["vivid", "natural"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
- """
- Creates an image given a prompt.
- [Learn more](https://platform.openai.com/docs/guides/images).
- Args:
- prompt: A text description of the desired image(s). The maximum length is 32000
- characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
- for `dall-e-3`.
- stream: Generate the image in streaming mode. Defaults to `false`. See the
- [Image generation guide](https://platform.openai.com/docs/guides/image-generation)
- for more information. This parameter is only supported for `gpt-image-1`.
- background: Allows to set transparency for the background of the generated image(s). This
- parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
- `opaque` or `auto` (default value). When `auto` is used, the model will
- automatically determine the best background for the image.
- If `transparent`, the output format needs to support transparency, so it should
- be set to either `png` (default value) or `webp`.
- model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
- `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
- `gpt-image-1` is used.
- moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
- be either `low` for less restrictive filtering or `auto` (default value).
- n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
- `n=1` is supported.
- output_compression: The compression level (0-100%) for the generated images. This parameter is only
- supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
- defaults to 100.
- output_format: The format in which the generated images are returned. This parameter is only
- supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
- partial_images: The number of partial images to generate. This parameter is used for streaming
- responses that return partial images. Value must be between 0 and 3. When set to
- 0, the response will be a single image sent in one streaming event.
- Note that the final image may be sent before the full number of partial images
- are generated if the full image is generated more quickly.
- quality: The quality of the image that will be generated.
- - `auto` (default value) will automatically select the best quality for the
- given model.
- - `high`, `medium` and `low` are supported for `gpt-image-1`.
- - `hd` and `standard` are supported for `dall-e-3`.
- - `standard` is the only option for `dall-e-2`.
- response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
- returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
- after the image has been generated. This parameter isn't supported for
- `gpt-image-1` which will always return base64-encoded images.
- size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
- (landscape), `1024x1536` (portrait), or `auto` (default value) for
- `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
- one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
- style: The style of the generated images. This parameter is only supported for
- `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
- towards generating hyper-real and dramatic images. Natural causes the model to
- produce more natural, less hyper-real looking images.
- user: A unique identifier representing your end-user, which can help OpenAI to monitor
- and detect abuse.
- [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- ...
- @required_args(["prompt"], ["prompt", "stream"])
- async def generate(
- self,
- *,
- prompt: str,
- background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
- model: Union[str, ImageModel, None] | Omit = omit,
- moderation: Optional[Literal["low", "auto"]] | Omit = omit,
- n: Optional[int] | Omit = omit,
- output_compression: Optional[int] | Omit = omit,
- output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
- partial_images: Optional[int] | Omit = omit,
- quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
- response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
- size: Optional[
- Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
- ]
- | Omit = omit,
- stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
- style: Optional[Literal["vivid", "natural"]] | Omit = omit,
- user: str | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
- return await self._post(
- "/images/generations",
- body=await async_maybe_transform(
- {
- "prompt": prompt,
- "background": background,
- "model": model,
- "moderation": moderation,
- "n": n,
- "output_compression": output_compression,
- "output_format": output_format,
- "partial_images": partial_images,
- "quality": quality,
- "response_format": response_format,
- "size": size,
- "stream": stream,
- "style": style,
- "user": user,
- },
- image_generate_params.ImageGenerateParamsStreaming
- if stream
- else image_generate_params.ImageGenerateParamsNonStreaming,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ImagesResponse,
- stream=stream or False,
- stream_cls=AsyncStream[ImageGenStreamEvent],
- )
- class ImagesWithRawResponse:
- def __init__(self, images: Images) -> None:
- self._images = images
- self.create_variation = _legacy_response.to_raw_response_wrapper(
- images.create_variation,
- )
- self.edit = _legacy_response.to_raw_response_wrapper(
- images.edit,
- )
- self.generate = _legacy_response.to_raw_response_wrapper(
- images.generate,
- )
- class AsyncImagesWithRawResponse:
- def __init__(self, images: AsyncImages) -> None:
- self._images = images
- self.create_variation = _legacy_response.async_to_raw_response_wrapper(
- images.create_variation,
- )
- self.edit = _legacy_response.async_to_raw_response_wrapper(
- images.edit,
- )
- self.generate = _legacy_response.async_to_raw_response_wrapper(
- images.generate,
- )
- class ImagesWithStreamingResponse:
- def __init__(self, images: Images) -> None:
- self._images = images
- self.create_variation = to_streamed_response_wrapper(
- images.create_variation,
- )
- self.edit = to_streamed_response_wrapper(
- images.edit,
- )
- self.generate = to_streamed_response_wrapper(
- images.generate,
- )
- class AsyncImagesWithStreamingResponse:
- def __init__(self, images: AsyncImages) -> None:
- self._images = images
- self.create_variation = async_to_streamed_response_wrapper(
- images.create_variation,
- )
- self.edit = async_to_streamed_response_wrapper(
- images.edit,
- )
- self.generate = async_to_streamed_response_wrapper(
- images.generate,
- )
|