| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774 |
- # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
- from __future__ import annotations
- from typing import List, Union, Optional
- from typing_extensions import Literal
- import httpx
- from ... import _legacy_response
- from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
- from ..._utils import maybe_transform, async_maybe_transform
- from ..._compat import cached_property
- from ..._resource import SyncAPIResource, AsyncAPIResource
- from ..._response import (
- StreamedBinaryAPIResponse,
- AsyncStreamedBinaryAPIResponse,
- to_streamed_response_wrapper,
- async_to_streamed_response_wrapper,
- to_custom_streamed_response_wrapper,
- async_to_custom_streamed_response_wrapper,
- )
- from ..._base_client import make_request_options
- from ...types.realtime import (
- call_refer_params,
- call_accept_params,
- call_create_params,
- call_reject_params,
- )
- from ...types.responses.response_prompt_param import ResponsePromptParam
- from ...types.realtime.realtime_truncation_param import RealtimeTruncationParam
- from ...types.realtime.realtime_audio_config_param import RealtimeAudioConfigParam
- from ...types.realtime.realtime_tools_config_param import RealtimeToolsConfigParam
- from ...types.realtime.realtime_tracing_config_param import RealtimeTracingConfigParam
- from ...types.realtime.realtime_tool_choice_config_param import RealtimeToolChoiceConfigParam
- from ...types.realtime.realtime_session_create_request_param import RealtimeSessionCreateRequestParam
- __all__ = ["Calls", "AsyncCalls"]
- class Calls(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> CallsWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
- For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
- """
- return CallsWithRawResponse(self)
- @cached_property
- def with_streaming_response(self) -> CallsWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/openai/openai-python#with_streaming_response
- """
- return CallsWithStreamingResponse(self)
- def create(
- self,
- *,
- sdp: str,
- session: RealtimeSessionCreateRequestParam | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> _legacy_response.HttpxBinaryResponseContent:
- """
- Create a new Realtime API call over WebRTC and receive the SDP answer needed to
- complete the peer connection.
- Args:
- sdp: WebRTC Session Description Protocol (SDP) offer generated by the caller.
- session: Realtime session object configuration.
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "application/sdp", **(extra_headers or {})}
- return self._post(
- "/realtime/calls",
- body=maybe_transform(
- {
- "sdp": sdp,
- "session": session,
- },
- call_create_params.CallCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=_legacy_response.HttpxBinaryResponseContent,
- )
- def accept(
- self,
- call_id: str,
- *,
- type: Literal["realtime"],
- audio: RealtimeAudioConfigParam | Omit = omit,
- include: List[Literal["item.input_audio_transcription.logprobs"]] | Omit = omit,
- instructions: str | Omit = omit,
- max_output_tokens: Union[int, Literal["inf"]] | Omit = omit,
- model: Union[
- str,
- Literal[
- "gpt-realtime",
- "gpt-realtime-2025-08-28",
- "gpt-4o-realtime-preview",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-realtime-preview-2025-06-03",
- "gpt-4o-mini-realtime-preview",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- "gpt-realtime-mini",
- "gpt-realtime-mini-2025-10-06",
- "gpt-audio-mini",
- "gpt-audio-mini-2025-10-06",
- ],
- ]
- | Omit = omit,
- output_modalities: List[Literal["text", "audio"]] | Omit = omit,
- prompt: Optional[ResponsePromptParam] | Omit = omit,
- tool_choice: RealtimeToolChoiceConfigParam | Omit = omit,
- tools: RealtimeToolsConfigParam | Omit = omit,
- tracing: Optional[RealtimeTracingConfigParam] | Omit = omit,
- truncation: RealtimeTruncationParam | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Accept an incoming SIP call and configure the realtime session that will handle
- it.
- Args:
- type: The type of session to create. Always `realtime` for the Realtime API.
- audio: Configuration for input and output audio.
- include: Additional fields to include in server outputs.
- `item.input_audio_transcription.logprobs`: Include logprobs for input audio
- transcription.
- instructions: The default system instructions (i.e. system message) prepended to model calls.
- This field allows the client to guide the model on desired responses. The model
- can be instructed on response content and format, (e.g. "be extremely succinct",
- "act friendly", "here are examples of good responses") and on audio behavior
- (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
- instructions are not guaranteed to be followed by the model, but they provide
- guidance to the model on the desired behavior.
- Note that the server sets default instructions which will be used if this field
- is not set and are visible in the `session.created` event at the start of the
- session.
- max_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
- tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
- `inf` for the maximum available tokens for a given model. Defaults to `inf`.
- model: The Realtime model used for this session.
- output_modalities: The set of modalities the model can respond with. It defaults to `["audio"]`,
- indicating that the model will respond with audio plus a transcript. `["text"]`
- can be used to make the model respond with text only. It is not possible to
- request both `text` and `audio` at the same time.
- prompt: Reference to a prompt template and its variables.
- [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
- tool_choice: How the model chooses tools. Provide one of the string modes or force a specific
- function/MCP tool.
- tools: Tools available to the model.
- tracing: Realtime API can write session traces to the
- [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
- tracing is enabled for a session, the configuration cannot be modified.
- `auto` will create a trace for the session with default values for the workflow
- name, group id, and metadata.
- truncation: When the number of tokens in a conversation exceeds the model's input token
- limit, the conversation be truncated, meaning messages (starting from the
- oldest) will not be included in the model's context. A 32k context model with
- 4,096 max output tokens can only include 28,224 tokens in the context before
- truncation occurs.
- Clients can configure truncation behavior to truncate with a lower max token
- limit, which is an effective way to control token usage and cost.
- Truncation will reduce the number of cached tokens on the next turn (busting the
- cache), since messages are dropped from the beginning of the context. However,
- clients can also configure truncation to retain messages up to a fraction of the
- maximum context size, which will reduce the need for future truncations and thus
- improve the cache rate.
- Truncation can be disabled entirely, which means the server will never truncate
- but would instead return an error if the conversation exceeds the model's input
- token limit.
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not call_id:
- raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- f"/realtime/calls/{call_id}/accept",
- body=maybe_transform(
- {
- "type": type,
- "audio": audio,
- "include": include,
- "instructions": instructions,
- "max_output_tokens": max_output_tokens,
- "model": model,
- "output_modalities": output_modalities,
- "prompt": prompt,
- "tool_choice": tool_choice,
- "tools": tools,
- "tracing": tracing,
- "truncation": truncation,
- },
- call_accept_params.CallAcceptParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
- def hangup(
- self,
- call_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- End an active Realtime API call, whether it was initiated over SIP or WebRTC.
- Args:
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not call_id:
- raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- f"/realtime/calls/{call_id}/hangup",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
- def refer(
- self,
- call_id: str,
- *,
- target_uri: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Transfer an active SIP call to a new destination using the SIP REFER verb.
- Args:
- target_uri: URI that should appear in the SIP Refer-To header. Supports values like
- `tel:+14155550123` or `sip:agent@example.com`.
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not call_id:
- raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- f"/realtime/calls/{call_id}/refer",
- body=maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
- def reject(
- self,
- call_id: str,
- *,
- status_code: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Decline an incoming SIP call by returning a SIP status code to the caller.
- Args:
- status_code: SIP response code to send back to the caller. Defaults to `603` (Decline) when
- omitted.
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not call_id:
- raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return self._post(
- f"/realtime/calls/{call_id}/reject",
- body=maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
- class AsyncCalls(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncCallsWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
- For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncCallsWithRawResponse(self)
- @cached_property
- def with_streaming_response(self) -> AsyncCallsWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
- For more information, see https://www.github.com/openai/openai-python#with_streaming_response
- """
- return AsyncCallsWithStreamingResponse(self)
- async def create(
- self,
- *,
- sdp: str,
- session: RealtimeSessionCreateRequestParam | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> _legacy_response.HttpxBinaryResponseContent:
- """
- Create a new Realtime API call over WebRTC and receive the SDP answer needed to
- complete the peer connection.
- Args:
- sdp: WebRTC Session Description Protocol (SDP) offer generated by the caller.
- session: Realtime session object configuration.
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- extra_headers = {"Accept": "application/sdp", **(extra_headers or {})}
- return await self._post(
- "/realtime/calls",
- body=await async_maybe_transform(
- {
- "sdp": sdp,
- "session": session,
- },
- call_create_params.CallCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=_legacy_response.HttpxBinaryResponseContent,
- )
- async def accept(
- self,
- call_id: str,
- *,
- type: Literal["realtime"],
- audio: RealtimeAudioConfigParam | Omit = omit,
- include: List[Literal["item.input_audio_transcription.logprobs"]] | Omit = omit,
- instructions: str | Omit = omit,
- max_output_tokens: Union[int, Literal["inf"]] | Omit = omit,
- model: Union[
- str,
- Literal[
- "gpt-realtime",
- "gpt-realtime-2025-08-28",
- "gpt-4o-realtime-preview",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-realtime-preview-2025-06-03",
- "gpt-4o-mini-realtime-preview",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- "gpt-realtime-mini",
- "gpt-realtime-mini-2025-10-06",
- "gpt-audio-mini",
- "gpt-audio-mini-2025-10-06",
- ],
- ]
- | Omit = omit,
- output_modalities: List[Literal["text", "audio"]] | Omit = omit,
- prompt: Optional[ResponsePromptParam] | Omit = omit,
- tool_choice: RealtimeToolChoiceConfigParam | Omit = omit,
- tools: RealtimeToolsConfigParam | Omit = omit,
- tracing: Optional[RealtimeTracingConfigParam] | Omit = omit,
- truncation: RealtimeTruncationParam | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Accept an incoming SIP call and configure the realtime session that will handle
- it.
- Args:
- type: The type of session to create. Always `realtime` for the Realtime API.
- audio: Configuration for input and output audio.
- include: Additional fields to include in server outputs.
- `item.input_audio_transcription.logprobs`: Include logprobs for input audio
- transcription.
- instructions: The default system instructions (i.e. system message) prepended to model calls.
- This field allows the client to guide the model on desired responses. The model
- can be instructed on response content and format, (e.g. "be extremely succinct",
- "act friendly", "here are examples of good responses") and on audio behavior
- (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
- instructions are not guaranteed to be followed by the model, but they provide
- guidance to the model on the desired behavior.
- Note that the server sets default instructions which will be used if this field
- is not set and are visible in the `session.created` event at the start of the
- session.
- max_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
- tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
- `inf` for the maximum available tokens for a given model. Defaults to `inf`.
- model: The Realtime model used for this session.
- output_modalities: The set of modalities the model can respond with. It defaults to `["audio"]`,
- indicating that the model will respond with audio plus a transcript. `["text"]`
- can be used to make the model respond with text only. It is not possible to
- request both `text` and `audio` at the same time.
- prompt: Reference to a prompt template and its variables.
- [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
- tool_choice: How the model chooses tools. Provide one of the string modes or force a specific
- function/MCP tool.
- tools: Tools available to the model.
- tracing: Realtime API can write session traces to the
- [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
- tracing is enabled for a session, the configuration cannot be modified.
- `auto` will create a trace for the session with default values for the workflow
- name, group id, and metadata.
- truncation: When the number of tokens in a conversation exceeds the model's input token
- limit, the conversation be truncated, meaning messages (starting from the
- oldest) will not be included in the model's context. A 32k context model with
- 4,096 max output tokens can only include 28,224 tokens in the context before
- truncation occurs.
- Clients can configure truncation behavior to truncate with a lower max token
- limit, which is an effective way to control token usage and cost.
- Truncation will reduce the number of cached tokens on the next turn (busting the
- cache), since messages are dropped from the beginning of the context. However,
- clients can also configure truncation to retain messages up to a fraction of the
- maximum context size, which will reduce the need for future truncations and thus
- improve the cache rate.
- Truncation can be disabled entirely, which means the server will never truncate
- but would instead return an error if the conversation exceeds the model's input
- token limit.
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not call_id:
- raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- f"/realtime/calls/{call_id}/accept",
- body=await async_maybe_transform(
- {
- "type": type,
- "audio": audio,
- "include": include,
- "instructions": instructions,
- "max_output_tokens": max_output_tokens,
- "model": model,
- "output_modalities": output_modalities,
- "prompt": prompt,
- "tool_choice": tool_choice,
- "tools": tools,
- "tracing": tracing,
- "truncation": truncation,
- },
- call_accept_params.CallAcceptParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
- async def hangup(
- self,
- call_id: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- End an active Realtime API call, whether it was initiated over SIP or WebRTC.
- Args:
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not call_id:
- raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- f"/realtime/calls/{call_id}/hangup",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
- async def refer(
- self,
- call_id: str,
- *,
- target_uri: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Transfer an active SIP call to a new destination using the SIP REFER verb.
- Args:
- target_uri: URI that should appear in the SIP Refer-To header. Supports values like
- `tel:+14155550123` or `sip:agent@example.com`.
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not call_id:
- raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- f"/realtime/calls/{call_id}/refer",
- body=await async_maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
- async def reject(
- self,
- call_id: str,
- *,
- status_code: int | Omit = omit,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> None:
- """
- Decline an incoming SIP call by returning a SIP status code to the caller.
- Args:
- status_code: SIP response code to send back to the caller. Defaults to `603` (Decline) when
- omitted.
- extra_headers: Send extra headers
- extra_query: Add additional query parameters to the request
- extra_body: Add additional JSON properties to the request
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not call_id:
- raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
- return await self._post(
- f"/realtime/calls/{call_id}/reject",
- body=await async_maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=NoneType,
- )
- class CallsWithRawResponse:
- def __init__(self, calls: Calls) -> None:
- self._calls = calls
- self.create = _legacy_response.to_raw_response_wrapper(
- calls.create,
- )
- self.accept = _legacy_response.to_raw_response_wrapper(
- calls.accept,
- )
- self.hangup = _legacy_response.to_raw_response_wrapper(
- calls.hangup,
- )
- self.refer = _legacy_response.to_raw_response_wrapper(
- calls.refer,
- )
- self.reject = _legacy_response.to_raw_response_wrapper(
- calls.reject,
- )
- class AsyncCallsWithRawResponse:
- def __init__(self, calls: AsyncCalls) -> None:
- self._calls = calls
- self.create = _legacy_response.async_to_raw_response_wrapper(
- calls.create,
- )
- self.accept = _legacy_response.async_to_raw_response_wrapper(
- calls.accept,
- )
- self.hangup = _legacy_response.async_to_raw_response_wrapper(
- calls.hangup,
- )
- self.refer = _legacy_response.async_to_raw_response_wrapper(
- calls.refer,
- )
- self.reject = _legacy_response.async_to_raw_response_wrapper(
- calls.reject,
- )
- class CallsWithStreamingResponse:
- def __init__(self, calls: Calls) -> None:
- self._calls = calls
- self.create = to_custom_streamed_response_wrapper(
- calls.create,
- StreamedBinaryAPIResponse,
- )
- self.accept = to_streamed_response_wrapper(
- calls.accept,
- )
- self.hangup = to_streamed_response_wrapper(
- calls.hangup,
- )
- self.refer = to_streamed_response_wrapper(
- calls.refer,
- )
- self.reject = to_streamed_response_wrapper(
- calls.reject,
- )
- class AsyncCallsWithStreamingResponse:
- def __init__(self, calls: AsyncCalls) -> None:
- self._calls = calls
- self.create = async_to_custom_streamed_response_wrapper(
- calls.create,
- AsyncStreamedBinaryAPIResponse,
- )
- self.accept = async_to_streamed_response_wrapper(
- calls.accept,
- )
- self.hangup = async_to_streamed_response_wrapper(
- calls.hangup,
- )
- self.refer = async_to_streamed_response_wrapper(
- calls.refer,
- )
- self.reject = async_to_streamed_response_wrapper(
- calls.reject,
- )
|