Skip to content

Commit

Permalink
feat: [vertexai] update gapic library to the latest version (#11129)
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 672705511

Co-authored-by: Jaycee Li <jayceeli@google.com>
  • Loading branch information
copybara-service[bot] and jaycee-li committed Sep 10, 2024
1 parent 93d9f6a commit 711cd72
Show file tree
Hide file tree
Showing 66 changed files with 17,208 additions and 783 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ public LlmUtilityServiceStub getStub() {
*
* @param endpoint Required. The name of the Endpoint requested to perform token counting. Format:
* `projects/{project}/locations/{location}/endpoints/{endpoint}`
* @param instances Required. The instances that are the input to token counting call. Schema is
* @param instances Optional. The instances that are the input to token counting call. Schema is
* identical to the prediction schema of the underlying model.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
Expand Down Expand Up @@ -351,7 +351,7 @@ public final CountTokensResponse countTokens(EndpointName endpoint, List<Value>
*
* @param endpoint Required. The name of the Endpoint requested to perform token counting. Format:
* `projects/{project}/locations/{location}/endpoints/{endpoint}`
* @param instances Required. The instances that are the input to token counting call. Schema is
* @param instances Optional. The instances that are the input to token counting call. Schema is
* identical to the prediction schema of the underlying model.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
*/
Expand Down Expand Up @@ -383,6 +383,8 @@ public final CountTokensResponse countTokens(String endpoint, List<Value> instan
* .setModel("model104069929")
* .addAllInstances(new ArrayList<Value>())
* .addAllContents(new ArrayList<Content>())
* .setSystemInstruction(Content.newBuilder().build())
* .addAllTools(new ArrayList<Tool>())
* .build();
* CountTokensResponse response = llmUtilityServiceClient.countTokens(request);
* }
Expand Down Expand Up @@ -417,6 +419,8 @@ public final CountTokensResponse countTokens(CountTokensRequest request) {
* .setModel("model104069929")
* .addAllInstances(new ArrayList<Value>())
* .addAllContents(new ArrayList<Content>())
* .setSystemInstruction(Content.newBuilder().build())
* .addAllTools(new ArrayList<Tool>())
* .build();
* ApiFuture<CountTokensResponse> future =
* llmUtilityServiceClient.countTokensCallable().futureCall(request);
Expand Down Expand Up @@ -451,7 +455,7 @@ public final UnaryCallable<CountTokensRequest, CountTokensResponse> countTokensC
*
* @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token
* ids.
* @param instances Required. The instances that are the input to token computing API call. Schema
* @param instances Optional. The instances that are the input to token computing API call. Schema
* is identical to the prediction schema of the text model, even for the non-text models, like
* chat models, or Codey models.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
Expand Down Expand Up @@ -488,7 +492,7 @@ public final ComputeTokensResponse computeTokens(EndpointName endpoint, List<Val
*
* @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token
* ids.
* @param instances Required. The instances that are the input to token computing API call. Schema
* @param instances Optional. The instances that are the input to token computing API call. Schema
* is identical to the prediction schema of the text model, even for the non-text models, like
* chat models, or Codey models.
* @throws com.google.api.gax.rpc.ApiException if the remote call fails
Expand Down Expand Up @@ -519,6 +523,8 @@ public final ComputeTokensResponse computeTokens(String endpoint, List<Value> in
* "[PROJECT]", "[LOCATION]", "[ENDPOINT]")
* .toString())
* .addAllInstances(new ArrayList<Value>())
* .setModel("model104069929")
* .addAllContents(new ArrayList<Content>())
* .build();
* ComputeTokensResponse response = llmUtilityServiceClient.computeTokens(request);
* }
Expand Down Expand Up @@ -551,6 +557,8 @@ public final ComputeTokensResponse computeTokens(ComputeTokensRequest request) {
* "[PROJECT]", "[LOCATION]", "[ENDPOINT]")
* .toString())
* .addAllInstances(new ArrayList<Value>())
* .setModel("model104069929")
* .addAllContents(new ArrayList<Content>())
* .build();
* ApiFuture<ComputeTokensResponse> future =
* llmUtilityServiceClient.computeTokensCallable().futureCall(request);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1378,8 +1378,12 @@ public final UnaryCallable<ExplainRequest, ExplainResponse> explainCallable() {
* }
* }</pre>
*
* @param model Required. The name of the publisher model requested to serve the prediction.
* Format: `projects/{project}/locations/{location}/publishers/&#42;/models/&#42;`
* @param model Required. The fully qualified name of the publisher model or tuned model endpoint
* to use.
* <p>Publisher model format:
* `projects/{project}/locations/{location}/publishers/&#42;/models/&#42;`
* <p>Tuned model endpoint format:
* `projects/{project}/locations/{location}/endpoints/{endpoint}`
* @param contents Required. The content of the current conversation with the model.
* <p>For single-turn queries, this is a single instance. For multi-turn queries, this is a
* repeated field that contains conversation history + latest request.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import com.google.api.core.ApiFunction;
import com.google.api.core.ApiFuture;
import com.google.api.core.BetaApi;
import com.google.api.core.ObsoleteApi;
import com.google.api.gax.core.GaxProperties;
import com.google.api.gax.core.GoogleCredentialsProvider;
import com.google.api.gax.core.InstantiatingExecutorProvider;
Expand Down Expand Up @@ -402,6 +403,7 @@ public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuild
}

/** Returns the default service endpoint. */
@ObsoleteApi("Use getEndpoint() instead")
public static String getDefaultEndpoint() {
return "aiplatform.googleapis.com:443";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -385,24 +385,12 @@ protected GrpcPredictionServiceStub(
streamDirectPredictTransportSettings =
GrpcCallSettings.<StreamDirectPredictRequest, StreamDirectPredictResponse>newBuilder()
.setMethodDescriptor(streamDirectPredictMethodDescriptor)
.setParamsExtractor(
request -> {
RequestParamsBuilder builder = RequestParamsBuilder.create();
builder.add("endpoint", String.valueOf(request.getEndpoint()));
return builder.build();
})
.build();
GrpcCallSettings<StreamDirectRawPredictRequest, StreamDirectRawPredictResponse>
streamDirectRawPredictTransportSettings =
GrpcCallSettings
.<StreamDirectRawPredictRequest, StreamDirectRawPredictResponse>newBuilder()
.setMethodDescriptor(streamDirectRawPredictMethodDescriptor)
.setParamsExtractor(
request -> {
RequestParamsBuilder builder = RequestParamsBuilder.create();
builder.add("endpoint", String.valueOf(request.getEndpoint()));
return builder.build();
})
.build();
GrpcCallSettings<StreamingPredictRequest, StreamingPredictResponse>
streamingPredictTransportSettings =
Expand Down
Loading

0 comments on commit 711cd72

Please sign in to comment.