// Copyright 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.cloud.ml.v1; import "google/api/annotations.proto"; import "google/api/httpbody.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/ml/v1;ml"; option java_multiple_files = true; option java_outer_classname = "PredictionServiceProto"; option java_package = "com.google.cloud.ml.api.v1"; // Copyright 2017 Google Inc. All Rights Reserved. // // Proto file for the Google Cloud Machine Learning Engine. // Describes the online prediction service. // The Prediction API, which serves predictions for models managed by // ModelService. service OnlinePredictionService { // Performs prediction on the data in the request. // // **** REMOVE FROM GENERATED DOCUMENTATION rpc Predict(PredictRequest) returns (google.api.HttpBody) { option (google.api.http) = { post: "/v1/{name=projects/**}:predict" body: "*" }; } } // Request for predictions to be issued against a trained model. // // The body of the request is a single JSON object with a single top-level // field: // //
// {"instances": ["1.0,true,\\"x\\"", "-2.0,false,\\"y\\""]} //// Plain text: //
// {"instances": ["the quick brown fox", "la bruja le dio"]} //// Sentences encoded as lists of words (vectors of strings): //
// { // "instances": [ // ["the","quick","brown"], // ["la","bruja","le"], // ... // ] // } //// Floating point scalar values: //
// {"instances": [0.0, 1.1, 2.2]} //// Vectors of integers: //
// { // "instances": [ // [0, 1, 2], // [3, 4, 5], // ... // ] // } //// Tensors (in this case, two-dimensional tensors): //
// { // "instances": [ // [ // [0, 1, 2], // [3, 4, 5] // ], // ... // ] // } //// Images can be represented different ways. In this encoding scheme the first // two dimensions represent the rows and columns of the image, and the third // contains lists (vectors) of the R, G, and B values for each pixel. //
// { // "instances": [ // [ // [ // [138, 30, 66], // [130, 20, 56], // ... // ], // [ // [126, 38, 61], // [122, 24, 57], // ... // ], // ... // ], // ... // ] // } //// JSON strings must be encoded as UTF-8. To send binary data, you must // base64-encode the data and mark it as binary. To mark a JSON string // as binary, replace it with a JSON object with a single attribute named `b64`: //
{"b64": "..."}// For example: // // Two Serialized tf.Examples (fake data, for illustrative purposes only): //
// {"instances": [{"b64": "X5ad6u"}, {"b64": "IA9j4nx"}]} //// Two JPEG image byte strings (fake data, for illustrative purposes only): //
// {"instances": [{"b64": "ASa8asdf"}, {"b64": "JLK7ljk3"}]} //// If your data includes named references, format each instance as a JSON object // with the named references as the keys: // // JSON input data to be preprocessed: //
// { // "instances": [ // { // "a": 1.0, // "b": true, // "c": "x" // }, // { // "a": -2.0, // "b": false, // "c": "y" // } // ] // } //// Some models have an underlying TensorFlow graph that accepts multiple input // tensors. In this case, you should use the names of JSON name/value pairs to // identify the input tensors, as shown in the following exmaples: // // For a graph with input tensor aliases "tag" (string) and "image" // (base64-encoded string): //
// { // "instances": [ // { // "tag": "beach", // "image": {"b64": "ASa8asdf"} // }, // { // "tag": "car", // "image": {"b64": "JLK7ljk3"} // } // ] // } //// For a graph with input tensor aliases "tag" (string) and "image" // (3-dimensional array of 8-bit ints): //
// { // "instances": [ // { // "tag": "beach", // "image": [ // [ // [138, 30, 66], // [130, 20, 56], // ... // ], // [ // [126, 38, 61], // [122, 24, 57], // ... // ], // ... // ] // }, // { // "tag": "car", // "image": [ // [ // [255, 0, 102], // [255, 0, 97], // ... // ], // [ // [254, 1, 101], // [254, 2, 93], // ... // ], // ... // ] // }, // ... // ] // } //// If the call is successful, the response body will contain one prediction // entry per instance in the request body. If prediction fails for any // instance, the response body will contain no predictions and will contian // a single error entry instead. message PredictRequest { // Required. The resource name of a model or a version. // // Authorization: requires `Viewer` role on the parent project. string name = 1; // // Required. The prediction request body. google.api.HttpBody http_body = 2; }