Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add flexible enum to model param #75

Merged
merged 1 commit into from
Jul 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions src/anthropic/resources/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from __future__ import annotations

from typing import List, overload
from typing import List, Union, overload
from typing_extensions import Literal

from ..types import Completion, completion_create_params
Expand All @@ -21,7 +21,7 @@ def create(
self,
*,
max_tokens_to_sample: int,
model: str,
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
metadata: completion_create_params.CompletionRequestNonStreamingMetadata | NotGiven = NOT_GIVEN,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -113,7 +113,7 @@ def create(
self,
*,
max_tokens_to_sample: int,
model: str,
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
stream: Literal[True],
metadata: completion_create_params.CompletionRequestStreamingMetadata | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -205,7 +205,7 @@ def create(
self,
*,
max_tokens_to_sample: int,
model: str,
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
metadata: completion_create_params.CompletionRequestNonStreamingMetadata
| completion_create_params.CompletionRequestStreamingMetadata
Expand Down Expand Up @@ -253,7 +253,7 @@ async def create(
self,
*,
max_tokens_to_sample: int,
model: str,
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
metadata: completion_create_params.CompletionRequestNonStreamingMetadata | NotGiven = NOT_GIVEN,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -345,7 +345,7 @@ async def create(
self,
*,
max_tokens_to_sample: int,
model: str,
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
stream: Literal[True],
metadata: completion_create_params.CompletionRequestStreamingMetadata | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -437,7 +437,7 @@ async def create(
self,
*,
max_tokens_to_sample: int,
model: str,
model: Union[str, Literal["claude-2", "claude-instant-1"]],
prompt: str,
metadata: completion_create_params.CompletionRequestNonStreamingMetadata
| completion_create_params.CompletionRequestStreamingMetadata
Expand Down
4 changes: 2 additions & 2 deletions src/anthropic/types/completion_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class CompletionRequestNonStreaming(TypedDict, total=False):
only specifies the absolute maximum number of tokens to generate.
"""

model: Required[str]
model: Required[Union[str, Literal["claude-2", "claude-instant-1"]]]
"""The model that will complete your prompt.

As we improve Claude, we develop new versions of it that you can query. This
Expand Down Expand Up @@ -109,7 +109,7 @@ class CompletionRequestStreaming(TypedDict, total=False):
only specifies the absolute maximum number of tokens to generate.
"""

model: Required[str]
model: Required[Union[str, Literal["claude-2", "claude-instant-1"]]]
"""The model that will complete your prompt.

As we improve Claude, we develop new versions of it that you can query. This
Expand Down