// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.ai.generativelanguage.v1; import "google/api/field_behavior.proto"; import "google/api/resource.proto"; option go_package = "cloud.google.com/go/ai/generativelanguage/apiv1/generativelanguagepb;generativelanguagepb"; option java_multiple_files = true; option java_outer_classname = "ModelProto"; option java_package = "com.google.ai.generativelanguage.v1"; // Information about a Generative Language Model. message Model { option (google.api.resource) = { type: "generativelanguage.googleapis.com/Model" pattern: "models/{model}" }; // Required. The resource name of the `Model`. Refer to [Model // variants](https://ai.google.dev/gemini-api/docs/models/gemini#model-variations) // for all allowed values. // // Format: `models/{model}` with a `{model}` naming convention of: // // * "{base_model_id}-{version}" // // Examples: // // * `models/gemini-1.5-flash-001` string name = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The name of the base model, pass this to the generation request. // // Examples: // // * `gemini-1.5-flash` string base_model_id = 2 [(google.api.field_behavior) = REQUIRED]; // Required. The version number of the model. // // This represents the major version (`1.0` or `1.5`) string version = 3 [(google.api.field_behavior) = REQUIRED]; // The human-readable name of the model. E.g. "Gemini 1.5 Flash". // // The name can be up to 128 characters long and can consist of any UTF-8 // characters. string display_name = 4; // A short description of the model. string description = 5; // Maximum number of input tokens allowed for this model. int32 input_token_limit = 6; // Maximum number of output tokens available for this model. int32 output_token_limit = 7; // The model's supported generation methods. // // The corresponding API method names are defined as Pascal case // strings, such as `generateMessage` and `generateContent`. repeated string supported_generation_methods = 8; // Controls the randomness of the output. // // Values can range over `[0.0,max_temperature]`, inclusive. A higher value // will produce responses that are more varied, while a value closer to `0.0` // will typically result in less surprising responses from the model. // This value specifies default to be used by the backend while making the // call to the model. optional float temperature = 9; // The maximum temperature this model can use. optional float max_temperature = 13; // For [Nucleus // sampling](https://ai.google.dev/gemini-api/docs/prompting-strategies#top-p). // // Nucleus sampling considers the smallest set of tokens whose probability // sum is at least `top_p`. // This value specifies default to be used by the backend while making the // call to the model. optional float top_p = 10; // For Top-k sampling. // // Top-k sampling considers the set of `top_k` most probable tokens. // This value specifies default to be used by the backend while making the // call to the model. // If empty, indicates the model doesn't use top-k sampling, and `top_k` isn't // allowed as a generation parameter. optional int32 top_k = 11; }