The body of an InvokeModel API request for token counting. This
structure mirrors the input format for the InvokeModel operation,
allowing you to count tokens for raw text inference requests.
Source code in src/aws_sdk_bedrock_runtime/models.py
14103
14104
14105
14106
14107
14108
14109
14110
14111
14112
14113
14114
14115
14116
14117
14118
14119
14120
14121
14122
14123
14124
14125
14126
14127
14128
14129
14130
14131
14132
14133
14134
14135
14136
14137
14138
14139
14140
14141
14142
14143
14144
14145
14146 | @dataclass(kw_only=True)
class InvokeModelTokensRequest:
"""The body of an `InvokeModel` API request for token counting. This
structure mirrors the input format for the `InvokeModel` operation,
allowing you to count tokens for raw text inference requests.
"""
body: bytes = field(repr=False)
"""The request body to count tokens for, formatted according to the
model's expected input format. To learn about the input format for
different models, see [Model inference parameters and
responses](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html).
"""
def serialize(self, serializer: ShapeSerializer):
serializer.write_struct(_SCHEMA_INVOKE_MODEL_TOKENS_REQUEST, self)
def serialize_members(self, serializer: ShapeSerializer):
serializer.write_blob(
_SCHEMA_INVOKE_MODEL_TOKENS_REQUEST.members["body"], self.body
)
@classmethod
def deserialize(cls, deserializer: ShapeDeserializer) -> Self:
return cls(**cls.deserialize_kwargs(deserializer))
@classmethod
def deserialize_kwargs(cls, deserializer: ShapeDeserializer) -> dict[str, Any]:
kwargs: dict[str, Any] = {}
def _consumer(schema: Schema, de: ShapeDeserializer) -> None:
match schema.expect_member_index():
case 0:
kwargs["body"] = de.read_blob(
_SCHEMA_INVOKE_MODEL_TOKENS_REQUEST.members["body"]
)
case _:
logger.debug("Unexpected member schema: %s", schema)
deserializer.read_struct(
_SCHEMA_INVOKE_MODEL_TOKENS_REQUEST, consumer=_consumer
)
return kwargs
|
Attributes
body
class-attribute
instance-attribute
body: bytes = field(repr=False)
The request body to count tokens for, formatted according to the
model's expected input format. To learn about the input format for
different models, see Model inference parameters and
responses.