// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.cloud.recaptchaenterprise.v1beta1; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; import "google/api/resource.proto"; import "google/protobuf/timestamp.proto"; option csharp_namespace = "Google.Cloud.RecaptchaEnterprise.V1Beta1"; option go_package = "google.golang.org/genproto/googleapis/cloud/recaptchaenterprise/v1beta1;recaptchaenterprise"; option java_multiple_files = true; option java_outer_classname = "RecaptchaEnterpriseProto"; option java_package = "com.google.recaptchaenterprise.v1beta1"; option objc_class_prefix = "GCRE"; option php_namespace = "Google\\Cloud\\RecaptchaEnterprise\\V1beta1"; option ruby_package = "Google::Cloud::RecaptchaEnterprise::V1beta1"; // Service to determine the likelihood an event is legitimate. service RecaptchaEnterpriseServiceV1Beta1 { option (google.api.default_host) = "recaptchaenterprise.googleapis.com"; option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; // Creates an Assessment of the likelihood an event is legitimate. rpc CreateAssessment(CreateAssessmentRequest) returns (Assessment) { option (google.api.http) = { post: "/v1beta1/{parent=projects/*}/assessments" body: "assessment" }; option (google.api.method_signature) = "parent,assessment"; } // Annotates a previously created Assessment to provide additional information // on whether the event turned out to be authentic or fradulent. rpc AnnotateAssessment(AnnotateAssessmentRequest) returns (AnnotateAssessmentResponse) { option (google.api.http) = { post: "/v1beta1/{name=projects/*/assessments/*}:annotate" body: "*" }; option (google.api.method_signature) = "name,annotation"; } } // The create assessment request message. message CreateAssessmentRequest { // Required. The name of the project in which the assessment will be created, // in the format "projects/{project_number}". string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "cloudresourcemanager.googleapis.com/Project" } ]; // Required. The assessment details. Assessment assessment = 2 [(google.api.field_behavior) = REQUIRED]; } // The request message to annotate an Assessment. message AnnotateAssessmentRequest { // Enum that represents the types of annotations. enum Annotation { // Default unspecified type. ANNOTATION_UNSPECIFIED = 0; // Provides information that the event turned out to be legitimate. LEGITIMATE = 1; // Provides information that the event turned out to be fraudulent. FRAUDULENT = 2; // Provides information that the event was related to a login event in which // the user typed the correct password. Deprecated, prefer indicating // CORRECT_PASSWORD through the reasons field instead. PASSWORD_CORRECT = 3 [deprecated = true]; // Provides information that the event was related to a login event in which // the user typed the incorrect password. Deprecated, prefer indicating // INCORRECT_PASSWORD through the reasons field instead. PASSWORD_INCORRECT = 4 [deprecated = true]; } // Enum that represents potential reasons for annotating an assessment. enum Reason { // Default unspecified reason. REASON_UNSPECIFIED = 0; // Indicates a chargeback issued for the transaction with no other details. // When possible, specify the type by using CHARGEBACK_FRAUD or // CHARGEBACK_DISPUTE instead. CHARGEBACK = 1; // Indicates a chargeback related to an alleged unauthorized transaction // from the cardholder's perspective (for example, the card number was // stolen). CHARGEBACK_FRAUD = 8; // Indicates a chargeback related to the cardholder having provided their // card details but allegedly not being satisfied with the purchase // (for example, misrepresentation, attempted cancellation). CHARGEBACK_DISPUTE = 9; // Indicates the transaction associated with the assessment is suspected of // being fraudulent based on the payment method, billing details, shipping // address or other transaction information. PAYMENT_HEURISTICS = 2; // Indicates that the user was served a 2FA challenge. An old assessment // with `ENUM_VALUES.INITIATED_TWO_FACTOR` reason that has not been // overwritten with `PASSED_TWO_FACTOR` is treated as an abandoned 2FA flow. // This is equivalent to `FAILED_TWO_FACTOR`. INITIATED_TWO_FACTOR = 7; // Indicates that the user passed a 2FA challenge. PASSED_TWO_FACTOR = 3; // Indicates that the user failed a 2FA challenge. FAILED_TWO_FACTOR = 4; // Indicates the user provided the correct password. CORRECT_PASSWORD = 5; // Indicates the user provided an incorrect password. INCORRECT_PASSWORD = 6; } // Required. The resource name of the Assessment, in the format // "projects/{project_number}/assessments/{assessment_id}". string name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "recaptchaenterprise.googleapis.com/Assessment" } ]; // Optional. The annotation that will be assigned to the Event. This field can be left // empty to provide reasons that apply to an event without concluding whether // the event is legitimate or fraudulent. Annotation annotation = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. Optional reasons for the annotation that will be assigned to the Event. repeated Reason reasons = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. Optional unique stable hashed user identifier to apply to the assessment. // This is an alternative to setting the hashed_account_id in // CreateAssessment, for example when the account identifier is not yet known // in the initial request. It is recommended that the identifier is hashed // using hmac-sha256 with stable secret. bytes hashed_account_id = 4 [(google.api.field_behavior) = OPTIONAL]; } // Empty response for AnnotateAssessment. message AnnotateAssessmentResponse { } // Password leak verification info. message PasswordLeakVerification { // Optional. Scrypt hash of the username+password that the customer wants to verify // against a known password leak. bytes hashed_user_credentials = 1 [(google.api.field_behavior) = OPTIONAL]; // Output only. Whether or not the user's credentials are present in a known leak. bool credentials_leaked = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; // Optional. The username part of the user credentials for which we want to trigger a // leak check in canonicalized form. This is the same data used to create the // hashed_user_credentials on the customer side. string canonicalized_username = 3 [(google.api.field_behavior) = OPTIONAL]; } // A recaptcha assessment resource. message Assessment { option (google.api.resource) = { type: "recaptchaenterprise.googleapis.com/Assessment" pattern: "projects/{project}/assessments/{assessment}" }; // Reasons contributing to the risk analysis verdict. enum ClassificationReason { // Default unspecified type. CLASSIFICATION_REASON_UNSPECIFIED = 0; // Interactions matched the behavior of an automated agent. AUTOMATION = 1; // The event originated from an illegitimate environment. UNEXPECTED_ENVIRONMENT = 2; // Traffic volume from the event source is higher than normal. TOO_MUCH_TRAFFIC = 3; // Interactions with the site were significantly different than expected // patterns. UNEXPECTED_USAGE_PATTERNS = 4; // Too little traffic has been received from this site thus far to generate // quality risk analysis. LOW_CONFIDENCE_SCORE = 5; } // Output only. The resource name for the Assessment in the format // "projects/{project_number}/assessments/{assessment_id}". string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // The event being assessed. Event event = 2; // Output only. Legitimate event score from 0.0 to 1.0. // (1.0 means very likely legitimate traffic while 0.0 means very likely // non-legitimate traffic). float score = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Properties of the provided event token. TokenProperties token_properties = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Reasons contributing to the risk analysis verdict. repeated ClassificationReason reasons = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; // Information about the user's credentials used to check for leaks. // This feature is part of the Early Access Program (EAP). Exercise caution, // and do not deploy integrations based on this feature in a production // environment. PasswordLeakVerification password_leak_verification = 7; // Assessment returned by Account Defender when a hashed_account_id is // provided. AccountDefenderAssessment account_defender_assessment = 8; } message Event { // Optional. The user response token provided by the reCAPTCHA client-side integration // on your site. string token = 1 [(google.api.field_behavior) = OPTIONAL]; // Optional. The site key that was used to invoke reCAPTCHA on your site and generate // the token. string site_key = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The user agent present in the request from the user's device related to // this event. string user_agent = 3 [(google.api.field_behavior) = OPTIONAL]; // Optional. The IP address in the request from the user's device related to this event. string user_ip_address = 4 [(google.api.field_behavior) = OPTIONAL]; // Optional. The expected action for this type of event. This should be the same action // provided at token generation time on client-side platforms already // integrated with recaptcha enterprise. string expected_action = 5 [(google.api.field_behavior) = OPTIONAL]; // Optional. Optional unique stable hashed user identifier for the request. The // identifier should ideally be hashed using sha256 with stable secret. bytes hashed_account_id = 6 [(google.api.field_behavior) = OPTIONAL]; } message TokenProperties { // Enum that represents the types of invalid token reasons. enum InvalidReason { // Default unspecified type. INVALID_REASON_UNSPECIFIED = 0; // If the failure reason was not accounted for. UNKNOWN_INVALID_REASON = 1; // The provided user verification token was malformed. MALFORMED = 2; // The user verification token had expired. EXPIRED = 3; // The user verification had already been seen. DUPE = 4; // The user verification token did not match the provided site key. // This may be a configuration error (e.g. development keys used in // production) or end users trying to use verification tokens from other // sites. SITE_MISMATCH = 5 [deprecated = true]; // The user verification token was not present. It is a required input. MISSING = 6; // A retriable error (such as network failure) occurred on the browser. // Could easily be simulated by an attacker. BROWSER_ERROR = 7; } // Whether the provided user response token is valid. When valid = false, the // reason could be specified in invalid_reason or it could also be due to // a user failing to solve a challenge or a sitekey mismatch (i.e the sitekey // used to generate the token was different than the one specified in the // assessment). bool valid = 1; // Reason associated with the response when valid = false. InvalidReason invalid_reason = 2; // The timestamp corresponding to the generation of the token. google.protobuf.Timestamp create_time = 3; // The hostname of the page on which the token was generated. string hostname = 4; // Action name provided at token generation. string action = 5; } // Account Defender risk assessment. message AccountDefenderAssessment { // Labels returned by Account Defender for this request. enum AccountDefenderLabel { // Default unspecified type. ACCOUNT_DEFENDER_LABEL_UNSPECIFIED = 0; // The request matches a known good profile for the user. PROFILE_MATCH = 1; // The request is potentially a suspicious login event and should be further // verified either via multi-factor authentication or another system. SUSPICIOUS_LOGIN_ACTIVITY = 2; // The request matched a profile that previously had suspicious account // creation behavior. This could mean this is a fake account. SUSPICIOUS_ACCOUNT_CREATION = 3; // The account in the request has a high number of related accounts. It does // not necessarily imply that the account is bad but could require // investigating. RELATED_ACCOUNTS_NUMBER_HIGH = 4; } // Labels for this request. repeated AccountDefenderLabel labels = 1; }