diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index 2c8faf1a87..9c54ee4c48 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -617,6 +617,7 @@ from .types.machine_resources import BatchDedicatedResources from .types.machine_resources import DedicatedResources from .types.machine_resources import DiskSpec +from .types.machine_resources import LustreMount from .types.machine_resources import MachineSpec from .types.machine_resources import NfsMount from .types.machine_resources import PersistentDiskSpec @@ -1775,6 +1776,7 @@ def _get_version(dependency_name): "LlmUtilityServiceClient", "LogprobsResult", "LookupStudyRequest", + "LustreMount", "MachineSpec", "ManualBatchTuningParameters", "MatchServiceClient", diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index a52a91a2a7..0bfadf9248 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -630,6 +630,7 @@ BatchDedicatedResources, DedicatedResources, DiskSpec, + LustreMount, MachineSpec, NfsMount, PersistentDiskSpec, @@ -1718,6 +1719,7 @@ "BatchDedicatedResources", "DedicatedResources", "DiskSpec", + "LustreMount", "MachineSpec", "NfsMount", "PersistentDiskSpec", diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index cb70c2ded7..828af16172 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -420,6 +420,8 @@ class WorkerPoolSpec(proto.Message): use for this worker pool. nfs_mounts (MutableSequence[google.cloud.aiplatform_v1.types.NfsMount]): Optional. List of NFS mount spec. + lustre_mounts (MutableSequence[google.cloud.aiplatform_v1.types.LustreMount]): + Optional. List of Lustre mounts. disk_spec (google.cloud.aiplatform_v1.types.DiskSpec): Disk spec. """ @@ -450,6 +452,11 @@ class WorkerPoolSpec(proto.Message): number=4, message=machine_resources.NfsMount, ) + lustre_mounts: MutableSequence[machine_resources.LustreMount] = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=machine_resources.LustreMount, + ) disk_spec: machine_resources.DiskSpec = proto.Field( proto.MESSAGE, number=5, diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py index 470b974e10..de5671cff3 100644 --- a/google/cloud/aiplatform_v1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1/types/machine_resources.py @@ -36,6 +36,7 @@ "DiskSpec", "PersistentDiskSpec", "NfsMount", + "LustreMount", "AutoscalingMetricSpec", "ShieldedVmConfig", }, @@ -125,33 +126,32 @@ class MachineSpec(proto.Message): class DedicatedResources(proto.Message): r"""A description of resources that are dedicated to a - DeployedModel, and that need a higher degree of manual - configuration. + DeployedModel or DeployedIndex, and that need a higher degree of + manual configuration. Attributes: machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): Required. Immutable. The specification of a - single machine used by the prediction. + single machine being used. min_replica_count (int): Required. Immutable. The minimum number of - machine replicas this DeployedModel will be - always deployed on. This value must be greater - than or equal to 1. - - If traffic against the DeployedModel increases, - it may dynamically be deployed onto more - replicas, and as traffic decreases, some of - these extra replicas may be freed. + machine replicas that will be always deployed + on. This value must be greater than or equal to + 1. + + If traffic increases, it may dynamically be + deployed onto more replicas, and as traffic + decreases, some of these extra replicas may be + freed. max_replica_count (int): - Immutable. The maximum number of replicas this DeployedModel - may be deployed on when the traffic against it increases. If - the requested value is too large, the deployment will error, - but if deployment succeeds then the ability to scale the - model to that many replicas is guaranteed (barring service - outages). If traffic against the DeployedModel increases - beyond what its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is not provided, - will use + Immutable. The maximum number of replicas that may be + deployed on when the traffic against it increases. If the + requested value is too large, the deployment will error, but + if deployment succeeds then the ability to scale to that + many replicas is guaranteed (barring service outages). If + traffic increases beyond what its replicas at maximum may + handle, a portion of the traffic will be dropped. If this + value is not provided, will use [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count] as the default value. @@ -163,8 +163,8 @@ class DedicatedResources(proto.Message): required_replica_count (int): Optional. Number of required available replicas for the deployment to succeed. This field is only needed when - partial model deployment/mutation is desired. If set, the - model deploy/mutate operation will succeed once + partial deployment/mutation is desired. If set, the + deploy/mutate operation will succeed once available_replica_count reaches required_replica_count, and the rest of the replicas will be retried. If not set, the default required_replica_count will be min_replica_count. @@ -239,28 +239,27 @@ class AutomaticResources(proto.Message): Attributes: min_replica_count (int): - Immutable. The minimum number of replicas this DeployedModel - will be always deployed on. If traffic against it increases, - it may dynamically be deployed onto more replicas up to + Immutable. The minimum number of replicas that will be + always deployed on. If traffic against it increases, it may + dynamically be deployed onto more replicas up to [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. max_replica_count (int): Immutable. The maximum number of replicas - this DeployedModel may be deployed on when the - traffic against it increases. If the requested - value is too large, the deployment will error, - but if deployment succeeds then the ability to - scale the model to that many replicas is - guaranteed (barring service outages). If traffic - against the DeployedModel increases beyond what - its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is - not provided, a no upper bound for scaling under - heavy traffic will be assume, though Vertex AI - may be unable to scale beyond certain replica - number. + that may be deployed on when the traffic against + it increases. If the requested value is too + large, the deployment will error, but if + deployment succeeds then the ability to scale to + that many replicas is guaranteed (barring + service outages). If traffic increases beyond + what its replicas at maximum may handle, a + portion of the traffic will be dropped. If this + value is not provided, a no upper bound for + scaling under heavy traffic will be assume, + though Vertex AI may be unable to scale beyond + certain replica number. """ min_replica_count: int = proto.Field( @@ -331,10 +330,12 @@ class DiskSpec(proto.Message): Attributes: boot_disk_type (str): - Type of the boot disk (default is "pd-ssd"). - Valid values: "pd-ssd" (Persistent Disk Solid - State Drive) or "pd-standard" (Persistent Disk - Hard Disk Drive). + Type of the boot disk. For non-A3U machines, + the default value is "pd-ssd", for A3U machines, + the default value is "hyperdisk-balanced". Valid + values: "pd-ssd" (Persistent Disk Solid State + Drive), "pd-standard" (Persistent Disk Hard Disk + Drive) or "hyperdisk-balanced". boot_disk_size_gb (int): Size in GB of the boot disk (default is 100GB). @@ -407,6 +408,40 @@ class NfsMount(proto.Message): ) +class LustreMount(proto.Message): + r"""Represents a mount configuration for Lustre file system. + + Attributes: + instance_ip (str): + Required. IP address of the Lustre instance. + volume_handle (str): + Required. The unique identifier of the Lustre + volume. + filesystem (str): + Required. The name of the Lustre filesystem. + mount_point (str): + Required. Destination mount path. The Lustre file system + will be mounted for the user under /mnt/lustre/ + """ + + instance_ip: str = proto.Field( + proto.STRING, + number=1, + ) + volume_handle: str = proto.Field( + proto.STRING, + number=2, + ) + filesystem: str = proto.Field( + proto.STRING, + number=3, + ) + mount_point: str = proto.Field( + proto.STRING, + number=4, + ) + + class AutoscalingMetricSpec(proto.Message): r"""The metric specification that defines the target resource utilization (CPU utilization, accelerator's duty cycle, and so @@ -419,6 +454,7 @@ class AutoscalingMetricSpec(proto.Message): - For Online Prediction: - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + - ``aiplatform.googleapis.com/prediction/online/request_count`` target (int): The target resource utilization in percentage (1% - 100%) for the given metric; once the real diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 9bd968d6da..759601dc07 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -1206,6 +1206,7 @@ from .types.tool import FunctionResponsePart from .types.tool import GoogleMaps from .types.tool import GoogleSearchRetrieval +from .types.tool import PartialArg from .types.tool import RagRetrievalConfig from .types.tool import Retrieval from .types.tool import RetrievalConfig @@ -2237,6 +2238,7 @@ def _get_version(dependency_name): "PairwiseSummarizationQualityResult", "PairwiseSummarizationQualitySpec", "Part", + "PartialArg", "PartnerModelTuningSpec", "PauseModelDeploymentMonitoringJobRequest", "PauseScheduleRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 0063a8d7b1..299cc957ec 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -1337,6 +1337,7 @@ FunctionResponsePart, GoogleMaps, GoogleSearchRetrieval, + PartialArg, RagRetrievalConfig, Retrieval, RetrievalConfig, @@ -2548,6 +2549,7 @@ "FunctionResponsePart", "GoogleMaps", "GoogleSearchRetrieval", + "PartialArg", "RagRetrievalConfig", "Retrieval", "RetrievalConfig", diff --git a/google/cloud/aiplatform_v1beta1/types/tool.py b/google/cloud/aiplatform_v1beta1/types/tool.py index bc9378c69f..88a3a55bae 100644 --- a/google/cloud/aiplatform_v1beta1/types/tool.py +++ b/google/cloud/aiplatform_v1beta1/types/tool.py @@ -32,6 +32,7 @@ "ToolUseExample", "FunctionDeclaration", "FunctionCall", + "PartialArg", "FunctionResponsePart", "FunctionResponseBlob", "FunctionResponseFileData", @@ -484,12 +485,22 @@ class FunctionCall(proto.Message): the client to execute the ``function_call`` and return the response with the matching ``id``. name (str): - Required. The name of the function to call. Matches + Optional. The name of the function to call. Matches [FunctionDeclaration.name]. args (google.protobuf.struct_pb2.Struct): - Optional. Required. The function parameters and values in - JSON object format. See [FunctionDeclaration.parameters] for - parameter details. + Optional. The function parameters and values in JSON object + format. See [FunctionDeclaration.parameters] for parameter + details. + partial_args (MutableSequence[google.cloud.aiplatform_v1beta1.types.PartialArg]): + Optional. The partial argument value of the + function call. If provided, represents the + arguments/fields that are streamed + incrementally. + will_continue (bool): + Optional. Whether this is the last part of + the FunctionCall. If true, another partial + message for the current FunctionCall is expected + to follow. """ id: str = proto.Field( @@ -505,6 +516,83 @@ class FunctionCall(proto.Message): number=2, message=struct_pb2.Struct, ) + partial_args: MutableSequence["PartialArg"] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message="PartialArg", + ) + will_continue: bool = proto.Field( + proto.BOOL, + number=5, + ) + + +class PartialArg(proto.Message): + r"""Partial argument value of the function call. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + null_value (google.protobuf.struct_pb2.NullValue): + Optional. Represents a null value. + + This field is a member of `oneof`_ ``delta``. + number_value (float): + Optional. Represents a double value. + + This field is a member of `oneof`_ ``delta``. + string_value (str): + Optional. Represents a string value. + + This field is a member of `oneof`_ ``delta``. + bool_value (bool): + Optional. Represents a boolean value. + + This field is a member of `oneof`_ ``delta``. + json_path (str): + Required. A JSON Path (RFC 9535) to the argument being + streamed. https://datatracker.ietf.org/doc/html/rfc9535. + e.g. "$.foo.bar[0].data". + will_continue (bool): + Optional. Whether this is not the last part of the same + json_path. If true, another PartialArg message for the + current json_path is expected to follow. + """ + + null_value: struct_pb2.NullValue = proto.Field( + proto.ENUM, + number=2, + oneof="delta", + enum=struct_pb2.NullValue, + ) + number_value: float = proto.Field( + proto.DOUBLE, + number=3, + oneof="delta", + ) + string_value: str = proto.Field( + proto.STRING, + number=4, + oneof="delta", + ) + bool_value: bool = proto.Field( + proto.BOOL, + number=5, + oneof="delta", + ) + json_path: str = proto.Field( + proto.STRING, + number=1, + ) + will_continue: bool = proto.Field( + proto.BOOL, + number=6, + ) class FunctionResponsePart(proto.Message): @@ -1115,6 +1203,11 @@ class FunctionCallingConfig(proto.Message): ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided. + stream_function_call_arguments (bool): + Optional. When set to true, arguments of a single function + call will be streamed out in multiple + parts/contents/responses. Partial parameter results will be + returned in the [FunctionCall.partial_args] field. """ class Mode(proto.Enum): @@ -1165,6 +1258,10 @@ class Mode(proto.Enum): proto.STRING, number=2, ) + stream_function_call_arguments: bool = proto.Field( + proto.BOOL, + number=4, + ) class RetrievalConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py index e377e7ddaf..30ec10c8bb 100644 --- a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py +++ b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py @@ -1546,20 +1546,32 @@ class RagManagedDbConfig(proto.Message): This field is a member of `oneof`_ ``tier``. scaled (google.cloud.aiplatform_v1beta1.types.RagManagedDbConfig.Scaled): - Sets the RagManagedDb to the Scaled tier. - This is the default tier if not explicitly - chosen. + Deprecated: Use ``mode`` instead to set the tier under + Spanner. Sets the RagManagedDb to the Scaled tier. This field is a member of `oneof`_ ``tier``. basic (google.cloud.aiplatform_v1beta1.types.RagManagedDbConfig.Basic): - Sets the RagManagedDb to the Basic tier. + Deprecated: Use ``mode`` instead to set the tier under + Spanner. Sets the RagManagedDb to the Basic tier. This field is a member of `oneof`_ ``tier``. unprovisioned (google.cloud.aiplatform_v1beta1.types.RagManagedDbConfig.Unprovisioned): - Sets the RagManagedDb to the Unprovisioned - tier. + Deprecated: Use ``mode`` instead to set the tier under + Spanner. Sets the RagManagedDb to the Unprovisioned tier. This field is a member of `oneof`_ ``tier``. + serverless (google.cloud.aiplatform_v1beta1.types.RagManagedDbConfig.Serverless): + Sets the backend to be the serverless mode + offered by RAG Engine. + + This field is a member of `oneof`_ ``mode``. + spanner (google.cloud.aiplatform_v1beta1.types.RagManagedDbConfig.Spanner): + Sets the RAG Engine backend to be + RagManagedDb, built on top of Spanner. + NOTE: This is the default mode (w/ Basic Tier) + if not explicitly chosen. + + This field is a member of `oneof`_ ``mode``. """ class Enterprise(proto.Message): @@ -1585,7 +1597,8 @@ class Basic(proto.Message): - Latency insensitive workload. - Only using RAG Engine with external vector DBs. - NOTE: This is the default tier if not explicitly chosen. + NOTE: This is the default tier under Spanner mode if not explicitly + chosen. """ @@ -1600,6 +1613,60 @@ class Unprovisioned(proto.Message): """ + class Spanner(proto.Message): + r"""Message to configure the Spanner database used by + RagManagedDb. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + scaled (google.cloud.aiplatform_v1beta1.types.RagManagedDbConfig.Scaled): + Sets the RagManagedDb to the Scaled tier. + + This field is a member of `oneof`_ ``tier``. + basic (google.cloud.aiplatform_v1beta1.types.RagManagedDbConfig.Basic): + Sets the RagManagedDb to the Basic tier. This + is the default tier for Spanner mode if not + explicitly chosen. + + This field is a member of `oneof`_ ``tier``. + unprovisioned (google.cloud.aiplatform_v1beta1.types.RagManagedDbConfig.Unprovisioned): + Sets the RagManagedDb to the Unprovisioned + tier. + + This field is a member of `oneof`_ ``tier``. + """ + + scaled: "RagManagedDbConfig.Scaled" = proto.Field( + proto.MESSAGE, + number=1, + oneof="tier", + message="RagManagedDbConfig.Scaled", + ) + basic: "RagManagedDbConfig.Basic" = proto.Field( + proto.MESSAGE, + number=2, + oneof="tier", + message="RagManagedDbConfig.Basic", + ) + unprovisioned: "RagManagedDbConfig.Unprovisioned" = proto.Field( + proto.MESSAGE, + number=3, + oneof="tier", + message="RagManagedDbConfig.Unprovisioned", + ) + + class Serverless(proto.Message): + r"""Message to configure the serverless mode offered by RAG + Engine. + + """ + enterprise: Enterprise = proto.Field( proto.MESSAGE, number=1, @@ -1624,6 +1691,18 @@ class Unprovisioned(proto.Message): oneof="tier", message=Unprovisioned, ) + serverless: Serverless = proto.Field( + proto.MESSAGE, + number=5, + oneof="mode", + message=Serverless, + ) + spanner: Spanner = proto.Field( + proto.MESSAGE, + number=6, + oneof="mode", + message=Spanner, + ) class RagEngineConfig(proto.Message): diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index 88ac6c8e7a..35d3487b2b 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.134.0" + "version": "0.0.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index 5e7a8e6322..2a9e00ef91 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.134.0" + "version": "0.0.0" }, "snippets": [ { diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 88d9de6c10..25c241ef4d 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -24609,6 +24609,14 @@ def test_create_custom_job_rest_call_success(request_type): "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, @@ -26268,6 +26276,14 @@ def test_create_hyperparameter_tuning_job_rest_call_success(request_type): "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, @@ -27155,6 +27171,14 @@ def test_create_nas_job_rest_call_success(request_type): "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, @@ -32047,6 +32071,14 @@ async def test_create_custom_job_rest_asyncio_call_success(request_type): "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, @@ -33871,6 +33903,14 @@ async def test_create_hyperparameter_tuning_job_rest_asyncio_call_success(reques "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, @@ -34856,6 +34896,14 @@ async def test_create_nas_job_rest_asyncio_call_success(request_type): "mount_point": "mount_point_value", } ], + "lustre_mounts": [ + { + "instance_ip": "instance_ip_value", + "volume_handle": "volume_handle_value", + "filesystem": "filesystem_value", + "mount_point": "mount_point_value", + } + ], "disk_spec": { "boot_disk_type": "boot_disk_type_value", "boot_disk_size_gb": 1792, diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py index 4ab43d718d..e901f5fc61 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_cache_service.py @@ -4815,6 +4815,17 @@ def test_create_cached_content_rest_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -4968,6 +4979,7 @@ def test_create_cached_content_rest_call_success(request_type): "allowed_function_names_value1", "allowed_function_names_value2", ], + "stream_function_call_arguments": True, }, "retrieval_config": { "lat_lng": {"latitude": 0.86, "longitude": 0.971}, @@ -5353,6 +5365,17 @@ def test_update_cached_content_rest_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -5506,6 +5529,7 @@ def test_update_cached_content_rest_call_success(request_type): "allowed_function_names_value1", "allowed_function_names_value2", ], + "stream_function_call_arguments": True, }, "retrieval_config": { "lat_lng": {"latitude": 0.86, "longitude": 0.971}, @@ -6753,6 +6777,17 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -6906,6 +6941,7 @@ async def test_create_cached_content_rest_asyncio_call_success(request_type): "allowed_function_names_value1", "allowed_function_names_value2", ], + "stream_function_call_arguments": True, }, "retrieval_config": { "lat_lng": {"latitude": 0.86, "longitude": 0.971}, @@ -7327,6 +7363,17 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -7480,6 +7527,7 @@ async def test_update_cached_content_rest_asyncio_call_success(request_type): "allowed_function_names_value1", "allowed_function_names_value2", ], + "stream_function_call_arguments": True, }, "retrieval_config": { "lat_lng": {"latitude": 0.86, "longitude": 0.971}, diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py index fa42930e05..620aeaa91f 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_gen_ai_tuning_service.py @@ -4911,6 +4911,17 @@ def test_create_tuning_job_rest_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -6676,6 +6687,17 @@ async def test_create_tuning_job_rest_asyncio_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py index 4bcbe9a0f1..aa91168a9c 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_session_service.py @@ -6978,6 +6978,17 @@ def test_append_event_rest_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", @@ -9189,6 +9200,17 @@ async def test_append_event_rest_asyncio_call_success(request_type): "id": "id_value", "name": "name_value", "args": {"fields": {}}, + "partial_args": [ + { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "json_path": "json_path_value", + "will_continue": True, + } + ], + "will_continue": True, }, "function_response": { "id": "id_value", diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py index b4337e95b2..c41d7da44c 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py @@ -10756,6 +10756,8 @@ def test_update_rag_engine_config_rest_call_success(request_type): "scaled": {}, "basic": {}, "unprovisioned": {}, + "serverless": {}, + "spanner": {"scaled": {}, "basic": {}, "unprovisioned": {}}, }, } # The version of a generated dependency at test runtime may differ from the version used during generation. @@ -13773,6 +13775,8 @@ async def test_update_rag_engine_config_rest_asyncio_call_success(request_type): "scaled": {}, "basic": {}, "unprovisioned": {}, + "serverless": {}, + "spanner": {"scaled": {}, "basic": {}, "unprovisioned": {}}, }, } # The version of a generated dependency at test runtime may differ from the version used during generation.