googleapis/google/cloud/visualinspection/v1beta1/annotation.proto

180 lines
6.8 KiB
Protocol Buffer

// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.cloud.visualinspection.v1beta1;
import "google/api/annotations.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/visualinspection/v1beta1/geometry.proto";
import "google/cloud/visualinspection/v1beta1/image.proto";
import "google/protobuf/timestamp.proto";
option csharp_namespace = "Google.Cloud.VisualInspection.V1Beta1";
option php_namespace = "Google\\Cloud\\VisualInspection\\V1beta1";
option ruby_package = "Google::Cloud::VisualInspection::V1beta1";
option go_package = "google.golang.org/genproto/googleapis/cloud/visualinspection/v1beta1;visualinspection";
option java_multiple_files = true;
option java_package = "com.google.cloud.visualinspection.v1beta1";
// Annotation is used to assign specific AnnotationSpec to a particular Image.
message Annotation {
option (google.api.resource) = {
type: "visualinspection.googleapis.com/Annotation"
pattern: "projects/{project}/locations/{location}/datasets/{dataset}/images/{image}/annotations/{annotation}"
};
// Source of the Annotation.
message Source {
// Source type of the Annotation.
enum SourceType {
// Unspecified source type.
SOURCE_TYPE_UNSPECIFIED = 0;
// The Annotation is generated by a human user.
HUMAN_PRODUCED = 1;
// The Annotation is generated by a ML model.
MACHINE_PRODUCED = 2;
// The Annotation is generated by machine, and it has
// child Annotation with HUMAN_PRODUCED.
HUMAN_LABELED = 3;
}
// Source type.
SourceType type = 1;
// Resource name of the source model when the Annotation is
// MACHINE_PRODUCED.
string source_model = 2 [(google.api.resource_reference) = {
type: "visualinspection.googleapis.com/Model"
}];
}
// Annotation type of the Annotation.
oneof annotation_type {
// A BoundingBox Annotation.
BoundingBoxAnnotation box = 8;
// A BoundingPoly Annotation.
BoundingPolyAnnotation polygon = 9;
// A Classification Label Annotation.
ClassificationLabelAnnotation classification_label = 10;
// A Mask Annotation.
MaskAnnotation mask = 11;
// A Polyline Annotation,
PolylineAnnotation polyline = 14;
}
// Output only. Resource name for Annotation generated by the system.
string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when this Annotation was created.
google.protobuf.Timestamp create_time = 2
[(google.api.field_behavior) = OUTPUT_ONLY];
// Output only. Timestamp when Annotation was last updated.
google.protobuf.Timestamp update_time = 3
[(google.api.field_behavior) = OUTPUT_ONLY];
// The resource ID of the AnnotationSpec that this Annotation pertains to.
// The AnnotationSpec must be in the same ancestor Dataset.
// The field could be empty for a MaskAnnotation.
string annotation_spec_id = 4;
// Required. The resource ID of the AnnotationSet that this Annotation belongs
// to.
string annotation_set_id = 5 [(google.api.field_behavior) = REQUIRED];
// The resource ID of the parent Annotation. Parent annotation represents a
// region in the original Image.
string parent_annotation_id = 7;
// Output only. Source of the Annotation.
Source source = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
// The labels with user-defined metadata to organize your Annotations.
//
// Label keys and values can be no longer than 64 characters
// (Unicode codepoints), can only contain lowercase letters, numeric
// characters, underscores and dashes. International characters are allowed.
// Label keys must start with a letter.
//
// See https://goo.gl/xmQnxf for more information on and examples of labels.
map<string, string> labels = 13;
}
// Detailed information for BoundingBox Annotation.
message BoundingBoxAnnotation {
// A box representing a rectangle region on the image.
// A box is supposed to have exactly four vertices in clockwise order.
// Vertices are represented in normalized format.
NormalizedBoundingPoly normalized_bounding_box = 1;
// Output only. A confidence score between 0.0 and 1.0. A higher value
// means greater confidence about the annotation.
float confidence_score = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Detailed information for BoundingPoly Annotation.
message BoundingPolyAnnotation {
// A polygon region on the image.
// A polygon is supposed to have N vertices in clockwise order.
// Vertices are represented in normalized format.
NormalizedBoundingPoly normalized_bounding_poly = 1;
// Output only. A confidence score between 0.0 and 1.0. A higher value
// means greater confidence that the annotation is positive.
float confidence_score = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Detailed information for ClassificationLabel Annotation.
message ClassificationLabelAnnotation {
// Output only. A confidence score between 0.0 and 1.0. A higher value
// means greater confidence that the annotation is positive.
float confidence_score = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
}
// Detailed information for Mask Annotation.
message MaskAnnotation {
// Output only. One channel image which is encoded as an 8bit lossless PNG.
// The size of the image will be the same as the original image. For a
// specific pixel, darker color means less confidence in correctness of the
// cateogry in the category_meask for the corresponding pixel. Black means no
// confidence and white means full confidence.
Image confidence_mask = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
// Three channel image which is encoded as an 8bit lossless PNG. Each pixel in
// the image mask represents the category which the pixel in the original
// image belong to. Each color is mapped to one AnnotationSpec based on
// annotation_spec_colors.
Image category_mask = 2;
}
// Detailed information for Polyline Annotation.
message PolylineAnnotation {
// A polyline consisting of connected straight-line segments.
NormalizedPolyline normalized_polyline = 1;
// Output only. A confidence score between 0.0 and 1.0. A higher value
// means greater confidence that the annotation is positive.
float confidence_score = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
}