diff --git a/.temporal-api/dependencies/gogoproto/gogo.proto b/.temporal-api/dependencies/gogoproto/gogo.proto new file mode 100644 index 0000000..49837cc --- /dev/null +++ b/.temporal-api/dependencies/gogoproto/gogo.proto @@ -0,0 +1,141 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/temporalio/gogo-protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; +} diff --git a/.temporal-api/google/api/annotations.proto b/.temporal-api/google/api/annotations.proto new file mode 100644 index 0000000..efdab3d --- /dev/null +++ b/.temporal-api/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/.temporal-api/google/api/http.proto b/.temporal-api/google/api/http.proto new file mode 100644 index 0000000..31d867a --- /dev/null +++ b/.temporal-api/google/api/http.proto @@ -0,0 +1,379 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/.temporal-api/google/protobuf/any.proto b/.temporal-api/google/protobuf/any.proto new file mode 100644 index 0000000..ad8a3b5 --- /dev/null +++ b/.temporal-api/google/protobuf/any.proto @@ -0,0 +1,162 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option go_package = "google.golang.org/protobuf/types/known/anypb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} \ No newline at end of file diff --git a/.temporal-api/google/protobuf/descriptor.proto b/.temporal-api/google/protobuf/descriptor.proto new file mode 100644 index 0000000..4bc6ce0 --- /dev/null +++ b/.temporal-api/google/protobuf/descriptor.proto @@ -0,0 +1,1212 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + +syntax = "proto2"; + +package google.protobuf; + +option go_package = "google.golang.org/protobuf/types/descriptorpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// The full set of known editions. +enum Edition { + // A placeholder for an unknown edition value. + EDITION_UNKNOWN = 0; + + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + EDITION_PROTO2 = 998; + EDITION_PROTO3 = 999; + + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + EDITION_2023 = 1000; + + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + EDITION_1_TEST_ONLY = 1; + EDITION_2_TEST_ONLY = 2; + EDITION_99997_TEST_ONLY = 99997; + EDITION_99998_TEST_ONLY = 99998; + EDITION_99999_TEST_ONLY = 99999; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2", "proto3", and "editions". + // + // If `edition` is present, this value must be "editions". + optional string syntax = 12; + + // The edition of the proto file. + optional Edition edition = 14; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + message Declaration { + // The extension number declared within the extension range. + optional int32 number = 1; + + // The fully-qualified name of the extension field. There must be a leading + // dot in front of the full name. + optional string full_name = 2; + + // The fully-qualified type name of the extension field. Unlike + // Metadata.type, Declaration.type must have a leading dot for messages + // and enums. + optional string type = 3; + + // If true, indicates that the number is reserved in the extension range, + // and any extension field with the number will fail to compile. Set this + // when a declared extension field is deleted. + optional bool reserved = 5; + + // If true, indicates that the extension must be defined as repeated. + // Otherwise the extension must be defined as optional. + optional bool repeated = 6; + + reserved 4; // removed is_repeated + } + + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. + repeated Declaration declaration = 2 [retention = RETENTION_SOURCE]; + + // Any features defined in the specific edition. + optional FeatureSet features = 50; + + // The verification state of the extension range. + enum VerificationState { + // All the extensions of the range must be declared. + DECLARATION = 0; + UNVERIFIED = 1; + } + + // The verification state of the range. + // TODO: flip the default to DECLARATION once all empty ranges + // are marked as UNVERIFIED. + optional VerificationState verification = 3 [default = UNVERIFIED]; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + } + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + } + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; + + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + optional bool proto3_optional = 17; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default = false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default = false]; +} + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. + optional string java_outer_classname = 8; + + // If enabled, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default = false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default = false]; + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default = SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default = false]; + optional bool java_generic_services = 17 [default = false]; + optional bool py_generic_services = 18 [default = false]; + optional bool php_generic_services = 42 [default = false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default = false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default = true]; + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + // Any features defined in the specific edition. + optional FeatureSet features = 50; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default = false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default = false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default = false]; + + reserved 4, 5, 6; + + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + // + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // + // This should only be used as a temporary measure against broken builds due + // to the change in behavior for JSON field name conflicts. + // + // TODO This is legacy behavior we plan to remove once downstream + // teams have had time to migrate. + optional bool deprecated_legacy_json_field_conflicts = 11 [deprecated = true]; + + // Any features defined in the specific edition. + optional FeatureSet features = 12; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is only implemented to support use of + // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + // type "bytes" in the open source release -- sorry, we'll try to include + // other types in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + // The option [ctype=CORD] may be applied to a non-repeated field of type + // "bytes". It indicates that in C++, the data should be stored in a Cord + // instead of a string. For very large strings, this may reduce memory + // fragmentation. It may also allow better performance when parsing from a + // Cord, or when parsing with aliasing enabled, as the parsed Cord may then + // alias the original buffer. + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + // + // As of May 2022, lazy verifies the contents of the byte stream during + // parsing. An invalid byte stream will cause the overall parsing to fail. + optional bool lazy = 5 [default = false]; + + // unverified_lazy does no correctness checks on the byte stream. This should + // only be used where lazy with verification is prohibitive for performance + // reasons. + optional bool unverified_lazy = 15 [default = false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default = false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default = false]; + + // Indicate that the field value should not be printed out when using debug + // formats, e.g. when the field contains sensitive credentials. + optional bool debug_redact = 16 [default = false]; + + // If set to RETENTION_SOURCE, the option will be omitted from the binary. + // Note: as of January 2023, support for this is in progress and does not yet + // have an effect (b/264593489). + enum OptionRetention { + RETENTION_UNKNOWN = 0; + RETENTION_RUNTIME = 1; + RETENTION_SOURCE = 2; + } + + optional OptionRetention retention = 17; + + // This indicates the types of entities that the field may apply to when used + // as an option. If it is unset, then the field may be freely used as an + // option on any kind of entity. Note: as of January 2023, support for this is + // in progress and does not yet have an effect (b/264593489). + enum OptionTargetType { + TARGET_TYPE_UNKNOWN = 0; + TARGET_TYPE_FILE = 1; + TARGET_TYPE_EXTENSION_RANGE = 2; + TARGET_TYPE_MESSAGE = 3; + TARGET_TYPE_FIELD = 4; + TARGET_TYPE_ONEOF = 5; + TARGET_TYPE_ENUM = 6; + TARGET_TYPE_ENUM_ENTRY = 7; + TARGET_TYPE_SERVICE = 8; + TARGET_TYPE_METHOD = 9; + } + + repeated OptionTargetType targets = 19; + + message EditionDefault { + optional Edition edition = 3; + optional string value = 2; // Textproto value. + } + repeated EditionDefault edition_defaults = 20; + + // Any features defined in the specific edition. + optional FeatureSet features = 21; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype + reserved 18; // reserve target, target_obsolete_do_not_use +} + +message OneofOptions { + // Any features defined in the specific edition. + optional FeatureSet features = 1; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default = false]; + + reserved 5; // javanano_as_lite + + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // TODO Remove this legacy behavior once downstream teams have + // had time to migrate. + optional bool deprecated_legacy_json_field_conflicts = 6 [deprecated = true]; + + // Any features defined in the specific edition. + optional FeatureSet features = 7; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default = false]; + + // Any features defined in the specific edition. + optional FeatureSet features = 2; + + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + optional bool debug_redact = 3 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Any features defined in the specific edition. + optional FeatureSet features = 34; + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default = false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default = false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = 34 + [default = IDEMPOTENCY_UNKNOWN]; + + // Any features defined in the specific edition. + optional FeatureSet features = 35; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents + // "foo.(bar.baz).moo". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Features + +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +message FeatureSet { + enum FieldPresence { + FIELD_PRESENCE_UNKNOWN = 0; + EXPLICIT = 1; + IMPLICIT = 2; + LEGACY_REQUIRED = 3; + } + optional FieldPresence field_presence = 1 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_FIELD, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "EXPLICIT" }, + edition_defaults = { edition: EDITION_PROTO3, value: "IMPLICIT" }, + edition_defaults = { edition: EDITION_2023, value: "EXPLICIT" } + ]; + + enum EnumType { + ENUM_TYPE_UNKNOWN = 0; + OPEN = 1; + CLOSED = 2; + } + optional EnumType enum_type = 2 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_ENUM, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "CLOSED" }, + edition_defaults = { edition: EDITION_PROTO3, value: "OPEN" } + ]; + + enum RepeatedFieldEncoding { + REPEATED_FIELD_ENCODING_UNKNOWN = 0; + PACKED = 1; + EXPANDED = 2; + } + optional RepeatedFieldEncoding repeated_field_encoding = 3 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_FIELD, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "EXPANDED" }, + edition_defaults = { edition: EDITION_PROTO3, value: "PACKED" } + ]; + + enum Utf8Validation { + UTF8_VALIDATION_UNKNOWN = 0; + UNVERIFIED = 1; + VERIFY = 2; + } + optional Utf8Validation utf8_validation = 4 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_FIELD, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "UNVERIFIED" }, + edition_defaults = { edition: EDITION_PROTO3, value: "VERIFY" } + ]; + + enum MessageEncoding { + MESSAGE_ENCODING_UNKNOWN = 0; + LENGTH_PREFIXED = 1; + DELIMITED = 2; + } + optional MessageEncoding message_encoding = 5 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_FIELD, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "LENGTH_PREFIXED" } + ]; + + enum JsonFormat { + JSON_FORMAT_UNKNOWN = 0; + ALLOW = 1; + LEGACY_BEST_EFFORT = 2; + } + optional JsonFormat json_format = 6 [ + retention = RETENTION_RUNTIME, + targets = TARGET_TYPE_MESSAGE, + targets = TARGET_TYPE_ENUM, + targets = TARGET_TYPE_FILE, + edition_defaults = { edition: EDITION_PROTO2, value: "LEGACY_BEST_EFFORT" }, + edition_defaults = { edition: EDITION_PROTO3, value: "ALLOW" } + ]; + + reserved 999; + + extensions 1000; // for Protobuf C++ + extensions 1001; // for Protobuf Java + + extensions 9995 to 9999; // For internal testing +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +message FeatureSetDefaults { + // A map from every known edition with a unique set of defaults to its + // defaults. Not all editions may be contained here. For a given edition, + // the defaults at the closest matching edition ordered at or before it should + // be used. This field must be in strict ascending order by edition. + message FeatureSetEditionDefault { + optional Edition edition = 3; + optional FeatureSet features = 2; + } + repeated FeatureSetEditionDefault defaults = 1; + + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + optional Edition minimum_edition = 4; + + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + optional Edition maximum_edition = 5; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition occurs. + // For example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed = true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed = true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to moo. + // // + // // Another line attached to moo. + // optional double moo = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to moo or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed = true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified object. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + + // Represents the identified object's effect on the element in the original + // .proto file. + enum Semantic { + // There is no effect or the effect is indescribable. + NONE = 0; + // The element is set or otherwise mutated. + SET = 1; + // An alias to the element is returned. + ALIAS = 2; + } + optional Semantic semantic = 5; + } +} \ No newline at end of file diff --git a/.temporal-api/google/protobuf/duration.proto b/.temporal-api/google/protobuf/duration.proto new file mode 100644 index 0000000..a49438b --- /dev/null +++ b/.temporal-api/google/protobuf/duration.proto @@ -0,0 +1,115 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/durationpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +message Duration { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} \ No newline at end of file diff --git a/.temporal-api/google/protobuf/timestamp.proto b/.temporal-api/google/protobuf/timestamp.proto new file mode 100644 index 0000000..d0698db --- /dev/null +++ b/.temporal-api/google/protobuf/timestamp.proto @@ -0,0 +1,144 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/timestamppb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// Example 6: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() +// ) to obtain a formatter capable of generating timestamps in this format. +// +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} \ No newline at end of file diff --git a/.temporal-api/google/protobuf/wrappers.proto b/.temporal-api/google/protobuf/wrappers.proto new file mode 100644 index 0000000..6c4b5ac --- /dev/null +++ b/.temporal-api/google/protobuf/wrappers.proto @@ -0,0 +1,123 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +syntax = "proto3"; + +package google.protobuf; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/protobuf/types/known/wrapperspb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} \ No newline at end of file diff --git a/.temporal-api/temporal/api/batch/v1/message.proto b/.temporal-api/temporal/api/batch/v1/message.proto new file mode 100644 index 0000000..57ff1c8 --- /dev/null +++ b/.temporal-api/temporal/api/batch/v1/message.proto @@ -0,0 +1,101 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.batch.v1; + +option go_package = "go.temporal.io/api/batch/v1;batch"; +option java_package = "io.temporal.api.batch.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Batch::V1"; +option csharp_namespace = "Temporalio.Api.Batch.V1"; + +import "dependencies/gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + + +import "temporal/api/common/v1/message.proto"; +import "temporal/api/enums/v1/batch_operation.proto"; +import "temporal/api/enums/v1/reset.proto"; + +message BatchOperationInfo { + // Batch job ID + string job_id = 1; + // Batch operation state + temporal.api.enums.v1.BatchOperationState state = 2; + // Batch operation start time + google.protobuf.Timestamp start_time = 3 [(gogoproto.stdtime) = true]; + // Batch operation close time + google.protobuf.Timestamp close_time = 4 [(gogoproto.stdtime) = true]; +} + +// BatchOperationTermination sends terminate requests to batch workflows. +// Keep the parameter in sync with temporal.api.workflowservice.v1.TerminateWorkflowExecutionRequest. +// Ignore first_execution_run_id because this is used for single workflow operation. +message BatchOperationTermination { + // Serialized value(s) to provide to the termination event + temporal.api.common.v1.Payloads details = 1; + // The identity of the worker/client + string identity = 2; +} + +// BatchOperationSignal sends signals to batch workflows. +// Keep the parameter in sync with temporal.api.workflowservice.v1.SignalWorkflowExecutionRequest. +message BatchOperationSignal { + // The workflow author-defined name of the signal to send to the workflow + string signal = 1; + // Serialized value(s) to provide with the signal + temporal.api.common.v1.Payloads input = 2; + // Headers that are passed with the signal to the processing workflow. + // These can include things like auth or tracing tokens. + temporal.api.common.v1.Header header = 3; + // The identity of the worker/client + string identity = 4; +} + +// BatchOperationCancellation sends cancel requests to batch workflows. +// Keep the parameter in sync with temporal.api.workflowservice.v1.RequestCancelWorkflowExecutionRequest. +// Ignore first_execution_run_id because this is used for single workflow operation. +message BatchOperationCancellation { + // The identity of the worker/client + string identity = 1; +} + +// BatchOperationDeletion sends deletion requests to batch workflows. +// Keep the parameter in sync with temporal.api.workflowservice.v1.DeleteWorkflowExecutionRequest. +message BatchOperationDeletion { + // The identity of the worker/client + string identity = 1; +} + +// BatchOperationReset sends reset requests to batch workflows. +// Keep the parameter in sync with temporal.api.workflowservice.v1.ResetWorkflowExecutionRequest. +message BatchOperationReset { + // Reset type. + temporal.api.enums.v1.ResetType reset_type = 1; + // History event reapply options. + temporal.api.enums.v1.ResetReapplyType reset_reapply_type = 2; + // The identity of the worker/client. + string identity = 3; +} \ No newline at end of file diff --git a/.temporal-api/temporal/api/command/v1/message.proto b/.temporal-api/temporal/api/command/v1/message.proto new file mode 100644 index 0000000..8a2b2b6 --- /dev/null +++ b/.temporal-api/temporal/api/command/v1/message.proto @@ -0,0 +1,259 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.command.v1; + +option go_package = "go.temporal.io/api/command/v1;command"; +option java_package = "io.temporal.api.command.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Command::V1"; +option csharp_namespace = "Temporalio.Api.Command.V1"; + +import "google/protobuf/duration.proto"; + +import "dependencies/gogoproto/gogo.proto"; + +import "temporal/api/enums/v1/workflow.proto"; +import "temporal/api/enums/v1/command_type.proto"; +import "temporal/api/common/v1/message.proto"; +import "temporal/api/failure/v1/message.proto"; +import "temporal/api/taskqueue/v1/message.proto"; + +message ScheduleActivityTaskCommandAttributes { + string activity_id = 1; + temporal.api.common.v1.ActivityType activity_type = 2; + // This used to be a `namespace` field which allowed to schedule activity in another namespace. + reserved 3; + temporal.api.taskqueue.v1.TaskQueue task_queue = 4; + temporal.api.common.v1.Header header = 5; + temporal.api.common.v1.Payloads input = 6; + // Indicates how long the caller is willing to wait for activity completion. The "schedule" time + // is when the activity is initially scheduled, not when the most recent retry is scheduled. + // Limits how long retries will be attempted. Either this or `start_to_close_timeout` must be + // specified. When not specified, defaults to the workflow execution timeout. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration schedule_to_close_timeout = 7 [(gogoproto.stdduration) = true]; + // Limits the time an activity task can stay in a task queue before a worker picks it up. The + // "schedule" time is when the most recent retry is scheduled. This timeout should usually not + // be set: it's useful in specific scenarios like worker-specific task queues. This timeout is + // always non retryable, as all a retry would achieve is to put it back into the same queue. + // Defaults to `schedule_to_close_timeout` or workflow execution timeout if that is not + // specified. More info: + // https://docs.temporal.io/docs/content/what-is-a-schedule-to-start-timeout/ + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration schedule_to_start_timeout = 8 [(gogoproto.stdduration) = true]; + // Maximum time an activity is allowed to execute after being picked up by a worker. This + // timeout is always retryable. Either this or `schedule_to_close_timeout` must be specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration start_to_close_timeout = 9 [(gogoproto.stdduration) = true]; + // Maximum permitted time between successful worker heartbeats. + google.protobuf.Duration heartbeat_timeout = 10 [(gogoproto.stdduration) = true]; + // Activities are provided by a default retry policy which is controlled through the service's + // dynamic configuration. Retries will be attempted until `schedule_to_close_timeout` has + // elapsed. To disable retries set retry_policy.maximum_attempts to 1. + temporal.api.common.v1.RetryPolicy retry_policy = 11; + // Request to start the activity directly bypassing matching service and worker polling + // The slot for executing the activity should be reserved when setting this field to true. + bool request_eager_execution = 12; + // If this is set, the workflow executing this command wishes to start the activity using + // a version compatible with the version that this workflow most recently ran on, if such + // behavior is possible. + bool use_compatible_version = 13; +} + +message RequestCancelActivityTaskCommandAttributes { + // The `ACTIVITY_TASK_SCHEDULED` event id for the activity being cancelled. + int64 scheduled_event_id = 1; +} + +message StartTimerCommandAttributes { + // An id for the timer, currently live timers must have different ids. Typically autogenerated + // by the SDK. + string timer_id = 1; + // How long until the timer fires, producing a `TIMER_FIRED` event. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration start_to_fire_timeout = 2 [(gogoproto.stdduration) = true]; +} + +message CompleteWorkflowExecutionCommandAttributes { + temporal.api.common.v1.Payloads result = 1; +} + +message FailWorkflowExecutionCommandAttributes { + temporal.api.failure.v1.Failure failure = 1; +} + +message CancelTimerCommandAttributes { + // The same timer id from the start timer command + string timer_id = 1; +} + +message CancelWorkflowExecutionCommandAttributes { + temporal.api.common.v1.Payloads details = 1; +} + +message RequestCancelExternalWorkflowExecutionCommandAttributes { + string namespace = 1; + string workflow_id = 2; + string run_id = 3; + // Deprecated. + string control = 4; + // Set this to true if the workflow being cancelled is a child of the workflow originating this + // command. The request will be rejected if it is set to true and the target workflow is *not* + // a child of the requesting workflow. + bool child_workflow_only = 5; + // Reason for requesting the cancellation + string reason = 6; +} + +message SignalExternalWorkflowExecutionCommandAttributes { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution execution = 2; + // The workflow author-defined name of the signal to send to the workflow. + string signal_name = 3; + // Serialized value(s) to provide with the signal. + temporal.api.common.v1.Payloads input = 4; + // Deprecated + string control = 5; + // Set this to true if the workflow being cancelled is a child of the workflow originating this + // command. The request will be rejected if it is set to true and the target workflow is *not* + // a child of the requesting workflow. + bool child_workflow_only = 6; + // Headers that are passed by the workflow that is sending a signal to the external + // workflow that is receiving this signal. + temporal.api.common.v1.Header header = 7; +} + +message UpsertWorkflowSearchAttributesCommandAttributes { + temporal.api.common.v1.SearchAttributes search_attributes = 1; +} + +message ModifyWorkflowPropertiesCommandAttributes { + // If set, update the workflow memo with the provided values. The values will be merged with + // the existing memo. If the user wants to delete values, a default/empty Payload should be + // used as the value for the key being deleted. + temporal.api.common.v1.Memo upserted_memo = 1; +} + +message RecordMarkerCommandAttributes { + string marker_name = 1; + map details = 2; + temporal.api.common.v1.Header header = 3; + temporal.api.failure.v1.Failure failure = 4; +} + +message ContinueAsNewWorkflowExecutionCommandAttributes { + temporal.api.common.v1.WorkflowType workflow_type = 1; + temporal.api.taskqueue.v1.TaskQueue task_queue = 2; + temporal.api.common.v1.Payloads input = 3; + + // Timeout of a single workflow run. + google.protobuf.Duration workflow_run_timeout = 4 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow task. + google.protobuf.Duration workflow_task_timeout = 5 [(gogoproto.stdduration) = true]; + // How long the workflow start will be delayed - not really a "backoff" in the traditional sense. + google.protobuf.Duration backoff_start_interval = 6 [(gogoproto.stdduration) = true]; + temporal.api.common.v1.RetryPolicy retry_policy = 7; + // Should be removed + temporal.api.enums.v1.ContinueAsNewInitiator initiator = 8; + // Should be removed + temporal.api.failure.v1.Failure failure = 9; + // Should be removed + temporal.api.common.v1.Payloads last_completion_result = 10; + // Should be removed. Not necessarily unused but unclear and not exposed by SDKs. + string cron_schedule = 11; + temporal.api.common.v1.Header header = 12; + temporal.api.common.v1.Memo memo = 13; + temporal.api.common.v1.SearchAttributes search_attributes = 14; + // If this is set, the workflow executing this command wishes to continue as new using a version + // compatible with the version that this workflow most recently ran on. + bool use_compatible_version = 15; + + // `workflow_execution_timeout` is omitted as it shouldn't be overridden from within a workflow. +} + +message StartChildWorkflowExecutionCommandAttributes { + string namespace = 1; + string workflow_id = 2; + temporal.api.common.v1.WorkflowType workflow_type = 3; + temporal.api.taskqueue.v1.TaskQueue task_queue = 4; + temporal.api.common.v1.Payloads input = 5; + // Total workflow execution timeout including retries and continue as new. + google.protobuf.Duration workflow_execution_timeout = 6 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow run. + google.protobuf.Duration workflow_run_timeout = 7 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow task. + google.protobuf.Duration workflow_task_timeout = 8 [(gogoproto.stdduration) = true]; + // Default: PARENT_CLOSE_POLICY_TERMINATE. + temporal.api.enums.v1.ParentClosePolicy parent_close_policy = 9; + string control = 10; + // Default: WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE. + temporal.api.enums.v1.WorkflowIdReusePolicy workflow_id_reuse_policy = 11; + temporal.api.common.v1.RetryPolicy retry_policy = 12; + // Establish a cron schedule for the child workflow. + string cron_schedule = 13; + temporal.api.common.v1.Header header = 14; + temporal.api.common.v1.Memo memo = 15; + temporal.api.common.v1.SearchAttributes search_attributes = 16; + // If this is set, the workflow executing this command wishes to start the child workflow using + // a version compatible with the version that this workflow most recently ran on, if such + // behavior is possible. + bool use_compatible_version = 17; +} + +message ProtocolMessageCommandAttributes { + // The message ID of the message to which this command is a pointer. + string message_id = 1; +} + +message Command { + temporal.api.enums.v1.CommandType command_type = 1; + oneof attributes { + ScheduleActivityTaskCommandAttributes schedule_activity_task_command_attributes = 2; + StartTimerCommandAttributes start_timer_command_attributes = 3; + CompleteWorkflowExecutionCommandAttributes complete_workflow_execution_command_attributes = 4; + FailWorkflowExecutionCommandAttributes fail_workflow_execution_command_attributes = 5; + RequestCancelActivityTaskCommandAttributes request_cancel_activity_task_command_attributes = 6; + CancelTimerCommandAttributes cancel_timer_command_attributes = 7; + CancelWorkflowExecutionCommandAttributes cancel_workflow_execution_command_attributes = 8; + RequestCancelExternalWorkflowExecutionCommandAttributes request_cancel_external_workflow_execution_command_attributes = 9; + RecordMarkerCommandAttributes record_marker_command_attributes = 10; + ContinueAsNewWorkflowExecutionCommandAttributes continue_as_new_workflow_execution_command_attributes = 11; + StartChildWorkflowExecutionCommandAttributes start_child_workflow_execution_command_attributes = 12; + SignalExternalWorkflowExecutionCommandAttributes signal_external_workflow_execution_command_attributes = 13; + UpsertWorkflowSearchAttributesCommandAttributes upsert_workflow_search_attributes_command_attributes = 14; + ProtocolMessageCommandAttributes protocol_message_command_attributes = 15; + // 16 is available for use - it was used as part of a prototype that never made it into a release + ModifyWorkflowPropertiesCommandAttributes modify_workflow_properties_command_attributes = 17; + } +} diff --git a/.temporal-api/temporal/api/common/v1/message.proto b/.temporal-api/temporal/api/common/v1/message.proto new file mode 100644 index 0000000..a66e4d6 --- /dev/null +++ b/.temporal-api/temporal/api/common/v1/message.proto @@ -0,0 +1,151 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.common.v1; + +option go_package = "go.temporal.io/api/common/v1;common"; +option java_package = "io.temporal.api.common.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Common::V1"; +option csharp_namespace = "Temporalio.Api.Common.V1"; + +import "google/protobuf/duration.proto"; + +import "dependencies/gogoproto/gogo.proto"; + +import "temporal/api/enums/v1/common.proto"; + +message DataBlob { + temporal.api.enums.v1.EncodingType encoding_type = 1; + bytes data = 2; +} + +// See `Payload` +message Payloads { + repeated Payload payloads = 1; +} + +// Represents some binary (byte array) data (ex: activity input parameters or workflow result) with +// metadata which describes this binary data (format, encoding, encryption, etc). Serialization +// of the data may be user-defined. +message Payload { + map metadata = 1; + bytes data = 2; +} + +// A user-defined set of *indexed* fields that are used/exposed when listing/searching workflows. +// The payload is not serialized in a user-defined way. +message SearchAttributes { + map indexed_fields = 1; +} + +// A user-defined set of *unindexed* fields that are exposed when listing/searching workflows +message Memo { + map fields = 1; +} + +// Contains metadata that can be attached to a variety of requests, like starting a workflow, and +// can be propagated between, for example, workflows and activities. +message Header { + map fields = 1; +} + +// Identifies a specific workflow within a namespace. Practically speaking, because run_id is a +// uuid, a workflow execution is globally unique. Note that many commands allow specifying an empty +// run id as a way of saying "target the latest run of the workflow". +message WorkflowExecution { + string workflow_id = 1; + string run_id = 2; +} + +// Represents the identifier used by a workflow author to define the workflow. Typically, the +// name of a function. This is sometimes referred to as the workflow's "name" +message WorkflowType { + string name = 1; +} + +// Represents the identifier used by a activity author to define the activity. Typically, the +// name of a function. This is sometimes referred to as the activity's "name" +message ActivityType { + string name = 1; +} + +// How retries ought to be handled, usable by both workflows and activities +message RetryPolicy { + // Interval of the first retry. If retryBackoffCoefficient is 1.0 then it is used for all retries. + google.protobuf.Duration initial_interval = 1 [(gogoproto.stdduration) = true]; + // Coefficient used to calculate the next retry interval. + // The next retry interval is previous interval multiplied by the coefficient. + // Must be 1 or larger. + double backoff_coefficient = 2; + // Maximum interval between retries. Exponential backoff leads to interval increase. + // This value is the cap of the increase. Default is 100x of the initial interval. + google.protobuf.Duration maximum_interval = 3 [(gogoproto.stdduration) = true]; + // Maximum number of attempts. When exceeded the retries stop even if not expired yet. + // 1 disables retries. 0 means unlimited (up to the timeouts) + int32 maximum_attempts = 4; + // Non-Retryable errors types. Will stop retrying if the error type matches this list. Note that + // this is not a substring match, the error *type* (not message) must match exactly. + repeated string non_retryable_error_types = 5; +} + +// Metadata relevant for metering purposes +message MeteringMetadata { + // Count of local activities which have begun an execution attempt during this workflow task, + // and whose first attempt occurred in some previous task. This is used for metering + // purposes, and does not affect workflow state. + // + // (-- api-linter: core::0141::forbidden-types=disabled + // aip.dev/not-precedent: Negative values make no sense to represent. --) + uint32 nonfirst_local_activity_execution_attempts = 13; +} + +// Identifies the version(s) of a worker that processed a task +message WorkerVersionStamp { + // An opaque whole-worker identifier. Replaces the deprecated `binary_checksum` field when this + // message is included in requests which previously used that. + string build_id = 1; + // Set if the worker used a dynamically loadable bundle to process + // the task. The bundle could be a WASM blob, JS bundle, etc. + string bundle_id = 2; + + // If set, the worker is opting in to worker versioning. Otherwise, this is used only as a + // marker for workflow reset points and the BuildIDs search attribute. + bool use_versioning = 3; +} + +// Identifies the version(s) that a worker is compatible with when polling or identifying itself, +// and whether or not this worker is opting into the build-id based versioning feature. This is +// used by matching to determine which workers ought to receive what tasks. +message WorkerVersionCapabilities { + // An opaque whole-worker identifier + string build_id = 1; + + // If set, the worker is opting in to worker versioning, and wishes to only receive appropriate + // tasks. + bool use_versioning = 2; + + // Later, may include info like "I can process WASM and/or JS bundles" +} diff --git a/.temporal-api/temporal/api/enums/v1/batch_operation.proto b/.temporal-api/temporal/api/enums/v1/batch_operation.proto new file mode 100644 index 0000000..c6e01ec --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/batch_operation.proto @@ -0,0 +1,48 @@ +// The MIT License +// +// Copyright (c) 2022 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "BatchOperationProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +enum BatchOperationType { + BATCH_OPERATION_TYPE_UNSPECIFIED = 0; + BATCH_OPERATION_TYPE_TERMINATE = 1; + BATCH_OPERATION_TYPE_CANCEL = 2; + BATCH_OPERATION_TYPE_SIGNAL = 3; + BATCH_OPERATION_TYPE_DELETE = 4; + BATCH_OPERATION_TYPE_RESET = 5; +} + +enum BatchOperationState { + BATCH_OPERATION_STATE_UNSPECIFIED = 0; + BATCH_OPERATION_STATE_RUNNING = 1; + BATCH_OPERATION_STATE_COMPLETED = 2; + BATCH_OPERATION_STATE_FAILED = 3; +} diff --git a/.temporal-api/temporal/api/enums/v1/command_type.proto b/.temporal-api/temporal/api/enums/v1/command_type.proto new file mode 100644 index 0000000..c0a2caa --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/command_type.proto @@ -0,0 +1,52 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "CommandTypeProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +// Whenever this list of command types is changed do change the function shouldBufferEvent in mutableStateBuilder.go to make sure to do the correct event ordering. +enum CommandType { + COMMAND_TYPE_UNSPECIFIED = 0; + COMMAND_TYPE_SCHEDULE_ACTIVITY_TASK = 1; + COMMAND_TYPE_REQUEST_CANCEL_ACTIVITY_TASK = 2; + COMMAND_TYPE_START_TIMER = 3; + COMMAND_TYPE_COMPLETE_WORKFLOW_EXECUTION = 4; + COMMAND_TYPE_FAIL_WORKFLOW_EXECUTION = 5; + COMMAND_TYPE_CANCEL_TIMER = 6; + COMMAND_TYPE_CANCEL_WORKFLOW_EXECUTION = 7; + COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION = 8; + COMMAND_TYPE_RECORD_MARKER = 9; + COMMAND_TYPE_CONTINUE_AS_NEW_WORKFLOW_EXECUTION = 10; + COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION = 11; + COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION = 12; + COMMAND_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES = 13; + COMMAND_TYPE_PROTOCOL_MESSAGE = 14; + COMMAND_TYPE_MODIFY_WORKFLOW_PROPERTIES = 16; +} diff --git a/.temporal-api/temporal/api/enums/v1/common.proto b/.temporal-api/temporal/api/enums/v1/common.proto new file mode 100644 index 0000000..c942f79 --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/common.proto @@ -0,0 +1,56 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "CommonProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +enum EncodingType { + ENCODING_TYPE_UNSPECIFIED = 0; + ENCODING_TYPE_PROTO3 = 1; + ENCODING_TYPE_JSON = 2; +} + +enum IndexedValueType { + INDEXED_VALUE_TYPE_UNSPECIFIED = 0; + INDEXED_VALUE_TYPE_TEXT = 1; + INDEXED_VALUE_TYPE_KEYWORD = 2; + INDEXED_VALUE_TYPE_INT = 3; + INDEXED_VALUE_TYPE_DOUBLE = 4; + INDEXED_VALUE_TYPE_BOOL = 5; + INDEXED_VALUE_TYPE_DATETIME = 6; + INDEXED_VALUE_TYPE_KEYWORD_LIST = 7; +} + +enum Severity { + SEVERITY_UNSPECIFIED = 0; + SEVERITY_HIGH = 1; + SEVERITY_MEDIUM = 2; + SEVERITY_LOW = 3; +} diff --git a/.temporal-api/temporal/api/enums/v1/event_type.proto b/.temporal-api/temporal/api/enums/v1/event_type.proto new file mode 100644 index 0000000..a65529c --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/event_type.proto @@ -0,0 +1,170 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "EventTypeProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +// Whenever this list of events is changed do change the function shouldBufferEvent in mutableStateBuilder.go to make sure to do the correct event ordering +enum EventType { + // Place holder and should never appear in a Workflow execution history + EVENT_TYPE_UNSPECIFIED = 0; + // Workflow execution has been triggered/started + // It contains Workflow execution inputs, as well as Workflow timeout configurations + EVENT_TYPE_WORKFLOW_EXECUTION_STARTED = 1; + // Workflow execution has successfully completed and contains Workflow execution results + EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED = 2; + // Workflow execution has unsuccessfully completed and contains the Workflow execution error + EVENT_TYPE_WORKFLOW_EXECUTION_FAILED = 3; + // Workflow execution has timed out by the Temporal Server + // Usually due to the Workflow having not been completed within timeout settings + EVENT_TYPE_WORKFLOW_EXECUTION_TIMED_OUT = 4; + // Workflow Task has been scheduled and the SDK client should now be able to process any new history events + EVENT_TYPE_WORKFLOW_TASK_SCHEDULED = 5; + // Workflow Task has started and the SDK client has picked up the Workflow Task and is processing new history events + EVENT_TYPE_WORKFLOW_TASK_STARTED = 6; + // Workflow Task has completed + // The SDK client picked up the Workflow Task and processed new history events + // SDK client may or may not ask the Temporal Server to do additional work, such as: + // EVENT_TYPE_ACTIVITY_TASK_SCHEDULED + // EVENT_TYPE_TIMER_STARTED + // EVENT_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES + // EVENT_TYPE_MARKER_RECORDED + // EVENT_TYPE_START_CHILD_WORKFLOW_EXECUTION_INITIATED + // EVENT_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED + // EVENT_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED + // EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED + // EVENT_TYPE_WORKFLOW_EXECUTION_FAILED + // EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED + // EVENT_TYPE_WORKFLOW_EXECUTION_CONTINUED_AS_NEW + EVENT_TYPE_WORKFLOW_TASK_COMPLETED = 7; + // Workflow Task encountered a timeout + // Either an SDK client with a local cache was not available at the time, or it took too long for the SDK client to process the task + EVENT_TYPE_WORKFLOW_TASK_TIMED_OUT = 8; + // Workflow Task encountered a failure + // Usually this means that the Workflow was non-deterministic + // However, the Workflow reset functionality also uses this event + EVENT_TYPE_WORKFLOW_TASK_FAILED = 9; + // Activity Task was scheduled + // The SDK client should pick up this activity task and execute + // This event type contains activity inputs, as well as activity timeout configurations + EVENT_TYPE_ACTIVITY_TASK_SCHEDULED = 10; + // Activity Task has started executing + // The SDK client has picked up the Activity Task and is processing the Activity invocation + EVENT_TYPE_ACTIVITY_TASK_STARTED = 11; + // Activity Task has finished successfully + // The SDK client has picked up and successfully completed the Activity Task + // This event type contains Activity execution results + EVENT_TYPE_ACTIVITY_TASK_COMPLETED = 12; + // Activity Task has finished unsuccessfully + // The SDK picked up the Activity Task but unsuccessfully completed it + // This event type contains Activity execution errors + EVENT_TYPE_ACTIVITY_TASK_FAILED = 13; + // Activity has timed out according to the Temporal Server + // Activity did not complete within the timeout settings + EVENT_TYPE_ACTIVITY_TASK_TIMED_OUT = 14; + // A request to cancel the Activity has occurred + // The SDK client will be able to confirm cancellation of an Activity during an Activity heartbeat + EVENT_TYPE_ACTIVITY_TASK_CANCEL_REQUESTED = 15; + // Activity has been cancelled + EVENT_TYPE_ACTIVITY_TASK_CANCELED = 16; + // A timer has started + EVENT_TYPE_TIMER_STARTED = 17; + // A timer has fired + EVENT_TYPE_TIMER_FIRED = 18; + // A time has been cancelled + EVENT_TYPE_TIMER_CANCELED = 19; + // A request has been made to cancel the Workflow execution + EVENT_TYPE_WORKFLOW_EXECUTION_CANCEL_REQUESTED = 20; + // SDK client has confirmed the cancellation request and the Workflow execution has been cancelled + EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED = 21; + // Workflow has requested that the Temporal Server try to cancel another Workflow + EVENT_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED = 22; + // Temporal Server could not cancel the targeted Workflow + // This is usually because the target Workflow could not be found + EVENT_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED = 23; + // Temporal Server has successfully requested the cancellation of the target Workflow + EVENT_TYPE_EXTERNAL_WORKFLOW_EXECUTION_CANCEL_REQUESTED = 24; + // A marker has been recorded. + // This event type is transparent to the Temporal Server + // The Server will only store it and will not try to understand it. + EVENT_TYPE_MARKER_RECORDED = 25; + // Workflow has received a Signal event + // The event type contains the Signal name, as well as a Signal payload + EVENT_TYPE_WORKFLOW_EXECUTION_SIGNALED = 26; + // Workflow execution has been forcefully terminated + // This is usually because the terminate Workflow API was called + EVENT_TYPE_WORKFLOW_EXECUTION_TERMINATED = 27; + // Workflow has successfully completed and a new Workflow has been started within the same transaction + // Contains last Workflow execution results as well as new Workflow execution inputs + EVENT_TYPE_WORKFLOW_EXECUTION_CONTINUED_AS_NEW = 28; + // Temporal Server will try to start a child Workflow + EVENT_TYPE_START_CHILD_WORKFLOW_EXECUTION_INITIATED = 29; + // Child Workflow execution cannot be started/triggered + // Usually due to a child Workflow ID collision + EVENT_TYPE_START_CHILD_WORKFLOW_EXECUTION_FAILED = 30; + // Child Workflow execution has successfully started/triggered + EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_STARTED = 31; + // Child Workflow execution has successfully completed + EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_COMPLETED = 32; + // Child Workflow execution has unsuccessfully completed + EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_FAILED = 33; + // Child Workflow execution has been cancelled + EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_CANCELED = 34; + // Child Workflow execution has timed out by the Temporal Server + EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_TIMED_OUT = 35; + // Child Workflow execution has been terminated + EVENT_TYPE_CHILD_WORKFLOW_EXECUTION_TERMINATED = 36; + // Temporal Server will try to Signal the targeted Workflow + // Contains the Signal name, as well as a Signal payload + EVENT_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED = 37; + // Temporal Server cannot Signal the targeted Workflow + // Usually because the Workflow could not be found + EVENT_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED = 38; + // Temporal Server has successfully Signaled the targeted Workflow + EVENT_TYPE_EXTERNAL_WORKFLOW_EXECUTION_SIGNALED = 39; + // Workflow search attributes should be updated and synchronized with the visibility store + EVENT_TYPE_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES = 40; + // An update was accepted (i.e. validated) + EVENT_TYPE_WORKFLOW_EXECUTION_UPDATE_ACCEPTED = 41; + // An update was rejected (i.e. failed validation) + EVENT_TYPE_WORKFLOW_EXECUTION_UPDATE_REJECTED = 42; + // An update completed + EVENT_TYPE_WORKFLOW_EXECUTION_UPDATE_COMPLETED = 43; + // Some property or properties of the workflow as a whole have changed by non-workflow code. + // The distinction of external vs. command-based modification is important so the SDK can + // maintain determinism when using the command-based approach. + EVENT_TYPE_WORKFLOW_PROPERTIES_MODIFIED_EXTERNALLY = 44; + // Some property or properties of an already-scheduled activity have changed by non-workflow code. + // The distinction of external vs. command-based modification is important so the SDK can + // maintain determinism when using the command-based approach. + EVENT_TYPE_ACTIVITY_PROPERTIES_MODIFIED_EXTERNALLY = 45; + // Workflow properties modified by user workflow code + EVENT_TYPE_WORKFLOW_PROPERTIES_MODIFIED = 46; +} diff --git a/.temporal-api/temporal/api/enums/v1/failed_cause.proto b/.temporal-api/temporal/api/enums/v1/failed_cause.proto new file mode 100644 index 0000000..a13684c --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/failed_cause.proto @@ -0,0 +1,129 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "FailedCauseProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +// Workflow tasks can fail for various reasons. Note that some of these reasons can only originate +// from the server, and some of them can only originate from the SDK/worker. +enum WorkflowTaskFailedCause { + WORKFLOW_TASK_FAILED_CAUSE_UNSPECIFIED = 0; + // Between starting and completing the workflow task (with a workflow completion command), some + // new command (like a signal) was processed into workflow history. The outstanding task will be + // failed with this reason, and a worker must pick up a new task. + WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND = 1; + WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_ACTIVITY_ATTRIBUTES = 2; + WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_ACTIVITY_ATTRIBUTES = 3; + WORKFLOW_TASK_FAILED_CAUSE_BAD_START_TIMER_ATTRIBUTES = 4; + WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_TIMER_ATTRIBUTES = 5; + WORKFLOW_TASK_FAILED_CAUSE_BAD_RECORD_MARKER_ATTRIBUTES = 6; + WORKFLOW_TASK_FAILED_CAUSE_BAD_COMPLETE_WORKFLOW_EXECUTION_ATTRIBUTES = 7; + WORKFLOW_TASK_FAILED_CAUSE_BAD_FAIL_WORKFLOW_EXECUTION_ATTRIBUTES = 8; + WORKFLOW_TASK_FAILED_CAUSE_BAD_CANCEL_WORKFLOW_EXECUTION_ATTRIBUTES = 9; + WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_ATTRIBUTES = 10; + WORKFLOW_TASK_FAILED_CAUSE_BAD_CONTINUE_AS_NEW_ATTRIBUTES = 11; + WORKFLOW_TASK_FAILED_CAUSE_START_TIMER_DUPLICATE_ID = 12; + // The worker wishes to fail the task and have the next one be generated on a normal, not sticky + // queue. Generally workers should prefer to use the explicit `ResetStickyTaskQueue` RPC call. + WORKFLOW_TASK_FAILED_CAUSE_RESET_STICKY_TASK_QUEUE = 13; + WORKFLOW_TASK_FAILED_CAUSE_WORKFLOW_WORKER_UNHANDLED_FAILURE = 14; + WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_WORKFLOW_EXECUTION_ATTRIBUTES = 15; + WORKFLOW_TASK_FAILED_CAUSE_BAD_START_CHILD_EXECUTION_ATTRIBUTES = 16; + WORKFLOW_TASK_FAILED_CAUSE_FORCE_CLOSE_COMMAND = 17; + WORKFLOW_TASK_FAILED_CAUSE_FAILOVER_CLOSE_COMMAND = 18; + WORKFLOW_TASK_FAILED_CAUSE_BAD_SIGNAL_INPUT_SIZE = 19; + WORKFLOW_TASK_FAILED_CAUSE_RESET_WORKFLOW = 20; + WORKFLOW_TASK_FAILED_CAUSE_BAD_BINARY = 21; + WORKFLOW_TASK_FAILED_CAUSE_SCHEDULE_ACTIVITY_DUPLICATE_ID = 22; + WORKFLOW_TASK_FAILED_CAUSE_BAD_SEARCH_ATTRIBUTES = 23; + // The worker encountered a mismatch while replaying history between what was expected, and + // what the workflow code actually did. + WORKFLOW_TASK_FAILED_CAUSE_NON_DETERMINISTIC_ERROR = 24; + WORKFLOW_TASK_FAILED_CAUSE_BAD_MODIFY_WORKFLOW_PROPERTIES_ATTRIBUTES = 25; + + // We send the below error codes to users when their requests would violate a size constraint + // of their workflow. We do this to ensure that the state of their workflow does not become too + // large because that can cause severe performance degradation. You can modify the thresholds for + // each of these errors within your dynamic config. + // + // Spawning a new child workflow would cause this workflow to exceed its limit of pending child + // workflows. + WORKFLOW_TASK_FAILED_CAUSE_PENDING_CHILD_WORKFLOWS_LIMIT_EXCEEDED = 26; + // Starting a new activity would cause this workflow to exceed its limit of pending activities + // that we track. + WORKFLOW_TASK_FAILED_CAUSE_PENDING_ACTIVITIES_LIMIT_EXCEEDED = 27; + // A workflow has a buffer of signals that have not yet reached their destination. We return this + // error when sending a new signal would exceed the capacity of this buffer. + WORKFLOW_TASK_FAILED_CAUSE_PENDING_SIGNALS_LIMIT_EXCEEDED = 28; + // Similarly, we have a buffer of pending requests to cancel other workflows. We return this error + // when our capacity for pending cancel requests is already reached. + WORKFLOW_TASK_FAILED_CAUSE_PENDING_REQUEST_CANCEL_LIMIT_EXCEEDED = 29; + // Workflow execution update message (update.Acceptance, update.Rejection, or update.Response) + // has wrong format, or missing required fields. + WORKFLOW_TASK_FAILED_CAUSE_BAD_UPDATE_WORKFLOW_EXECUTION_MESSAGE = 30; + // Similar to WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_COMMAND, but for updates. + WORKFLOW_TASK_FAILED_CAUSE_UNHANDLED_UPDATE = 31; +} + +enum StartChildWorkflowExecutionFailedCause { + START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_UNSPECIFIED = 0; + START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_WORKFLOW_ALREADY_EXISTS = 1; + START_CHILD_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND = 2; +} + +enum CancelExternalWorkflowExecutionFailedCause { + CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_UNSPECIFIED = 0; + CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND = 1; + CANCEL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND = 2; +} + +enum SignalExternalWorkflowExecutionFailedCause { + SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_UNSPECIFIED = 0; + SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_EXTERNAL_WORKFLOW_EXECUTION_NOT_FOUND = 1; + SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_NAMESPACE_NOT_FOUND = 2; + // Signal count limit is per workflow and controlled by server dynamic config "history.maximumSignalsPerExecution" + SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_FAILED_CAUSE_SIGNAL_COUNT_LIMIT_EXCEEDED = 3; +} + +enum ResourceExhaustedCause { + RESOURCE_EXHAUSTED_CAUSE_UNSPECIFIED = 0; + // Caller exceeds request per second limit. + RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT = 1; + // Caller exceeds max concurrent request limit. + RESOURCE_EXHAUSTED_CAUSE_CONCURRENT_LIMIT = 2; + // System overloaded. + RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED = 3; + // Namespace exceeds persistence rate limit. + RESOURCE_EXHAUSTED_CAUSE_PERSISTENCE_LIMIT = 4; + // Workflow is busy + RESOURCE_EXHAUSTED_CAUSE_BUSY_WORKFLOW = 5; + // Caller exceeds action per second limit. + RESOURCE_EXHAUSTED_CAUSE_APS_LIMIT = 6; +} diff --git a/.temporal-api/temporal/api/enums/v1/namespace.proto b/.temporal-api/temporal/api/enums/v1/namespace.proto new file mode 100644 index 0000000..561967a --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/namespace.proto @@ -0,0 +1,51 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "NamespaceProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +enum NamespaceState { + NAMESPACE_STATE_UNSPECIFIED = 0; + NAMESPACE_STATE_REGISTERED = 1; + NAMESPACE_STATE_DEPRECATED = 2; + NAMESPACE_STATE_DELETED = 3; +} + +enum ArchivalState { + ARCHIVAL_STATE_UNSPECIFIED = 0; + ARCHIVAL_STATE_DISABLED = 1; + ARCHIVAL_STATE_ENABLED = 2; +} + +enum ReplicationState { + REPLICATION_STATE_UNSPECIFIED = 0; + REPLICATION_STATE_NORMAL = 1; + REPLICATION_STATE_HANDOVER = 2; +} diff --git a/.temporal-api/temporal/api/enums/v1/query.proto b/.temporal-api/temporal/api/enums/v1/query.proto new file mode 100644 index 0000000..0638594 --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/query.proto @@ -0,0 +1,50 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "QueryProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +enum QueryResultType { + QUERY_RESULT_TYPE_UNSPECIFIED = 0; + QUERY_RESULT_TYPE_ANSWERED = 1; + QUERY_RESULT_TYPE_FAILED = 2; +} + +enum QueryRejectCondition { + QUERY_REJECT_CONDITION_UNSPECIFIED = 0; + // None indicates that query should not be rejected. + QUERY_REJECT_CONDITION_NONE = 1; + // NotOpen indicates that query should be rejected if workflow is not open. + QUERY_REJECT_CONDITION_NOT_OPEN = 2; + // NotCompletedCleanly indicates that query should be rejected if workflow did not complete cleanly. + QUERY_REJECT_CONDITION_NOT_COMPLETED_CLEANLY = 3; +} + + diff --git a/.temporal-api/temporal/api/enums/v1/reset.proto b/.temporal-api/temporal/api/enums/v1/reset.proto new file mode 100644 index 0000000..3cb9b30 --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/reset.proto @@ -0,0 +1,50 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "ResetProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +// Reset reapply (replay) options +// * RESET_REAPPLY_TYPE_SIGNAL (default) - Signals are reapplied when workflow is reset +// * RESET_REAPPLY_TYPE_NONE - nothing is reapplied +enum ResetReapplyType { + RESET_REAPPLY_TYPE_UNSPECIFIED = 0; + RESET_REAPPLY_TYPE_SIGNAL = 1; + RESET_REAPPLY_TYPE_NONE = 2; +} + +// Reset type options +enum ResetType { + RESET_TYPE_UNSPECIFIED = 0; + // Resets to event of the first workflow task completed, or if it does not exist, the event after task scheduled. + RESET_TYPE_FIRST_WORKFLOW_TASK = 1; + // Resets to event of the last workflow task completed, or if it does not exist, the event after task scheduled. + RESET_TYPE_LAST_WORKFLOW_TASK = 2; +} \ No newline at end of file diff --git a/.temporal-api/temporal/api/enums/v1/schedule.proto b/.temporal-api/temporal/api/enums/v1/schedule.proto new file mode 100644 index 0000000..4319487 --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/schedule.proto @@ -0,0 +1,60 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "ScheduleProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + + +// ScheduleOverlapPolicy controls what happens when a workflow would be started +// by a schedule, and is already running. +enum ScheduleOverlapPolicy { + SCHEDULE_OVERLAP_POLICY_UNSPECIFIED = 0; + // SCHEDULE_OVERLAP_POLICY_SKIP (default) means don't start anything. When the + // workflow completes, the next scheduled event after that time will be considered. + SCHEDULE_OVERLAP_POLICY_SKIP = 1; + // SCHEDULE_OVERLAP_POLICY_BUFFER_ONE means start the workflow again soon as the + // current one completes, but only buffer one start in this way. If another start is + // supposed to happen when the workflow is running, and one is already buffered, then + // only the first one will be started after the running workflow finishes. + SCHEDULE_OVERLAP_POLICY_BUFFER_ONE = 2; + // SCHEDULE_OVERLAP_POLICY_BUFFER_ALL means buffer up any number of starts to all + // happen sequentially, immediately after the running workflow completes. + SCHEDULE_OVERLAP_POLICY_BUFFER_ALL = 3; + // SCHEDULE_OVERLAP_POLICY_CANCEL_OTHER means that if there is another workflow + // running, cancel it, and start the new one after the old one completes cancellation. + SCHEDULE_OVERLAP_POLICY_CANCEL_OTHER = 4; + // SCHEDULE_OVERLAP_POLICY_TERMINATE_OTHER means that if there is another workflow + // running, terminate it and start the new one immediately. + SCHEDULE_OVERLAP_POLICY_TERMINATE_OTHER = 5; + // SCHEDULE_OVERLAP_POLICY_ALLOW_ALL means start any number of concurrent workflows. + // Note that with this policy, last completion result and last failure will not be + // available since workflows are not sequential. + SCHEDULE_OVERLAP_POLICY_ALLOW_ALL = 6; +} diff --git a/.temporal-api/temporal/api/enums/v1/task_queue.proto b/.temporal-api/temporal/api/enums/v1/task_queue.proto new file mode 100644 index 0000000..3ae4349 --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/task_queue.proto @@ -0,0 +1,78 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "TaskQueueProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +enum TaskQueueKind { + TASK_QUEUE_KIND_UNSPECIFIED = 0; + // Tasks from a normal workflow task queue always include complete workflow history + // + // The task queue specified by the user is always a normal task queue. There can be as many + // workers as desired for a single normal task queue. All those workers may pick up tasks from + // that queue. + TASK_QUEUE_KIND_NORMAL = 1; + // A sticky queue only includes new history since the last workflow task, and they are + // per-worker. + // + // Sticky queues are created dynamically by each worker during their start up. They only exist + // for the lifetime of the worker process. Tasks in a sticky task queue are only available to + // the worker that created the sticky queue. + // + // Sticky queues are only for workflow tasks. There are no sticky task queues for activities. + TASK_QUEUE_KIND_STICKY = 2; +} + +enum TaskQueueType { + TASK_QUEUE_TYPE_UNSPECIFIED = 0; + // Workflow type of task queue. + TASK_QUEUE_TYPE_WORKFLOW = 1; + // Activity type of task queue. + TASK_QUEUE_TYPE_ACTIVITY = 2; +} + +// Specifies which category of tasks may reach a worker on a versioned task queue. +// Used both in a reachability query and its response. +enum TaskReachability { + TASK_REACHABILITY_UNSPECIFIED = 0; + // There's a possiblity for a worker to receive new workflow tasks. Workers should *not* be retired. + TASK_REACHABILITY_NEW_WORKFLOWS = 1; + // There's a possiblity for a worker to receive existing workflow and activity tasks from existing workflows. Workers + // should *not* be retired. + // This enum value does not distinguish between open and closed workflows. + TASK_REACHABILITY_EXISTING_WORKFLOWS = 2; + // There's a possiblity for a worker to receive existing workflow and activity tasks from open workflows. Workers + // should *not* be retired. + TASK_REACHABILITY_OPEN_WORKFLOWS = 3; + // There's a possiblity for a worker to receive existing workflow tasks from closed workflows. Workers may be + // retired dependending on application requirements. For example, if there's no need to query closed workflows. + TASK_REACHABILITY_CLOSED_WORKFLOWS = 4; +} + diff --git a/.temporal-api/temporal/api/enums/v1/update.proto b/.temporal-api/temporal/api/enums/v1/update.proto new file mode 100644 index 0000000..6635288 --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/update.proto @@ -0,0 +1,56 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "UpdateProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +// UpdateWorkflowExecutionLifecycleStage is specified by clients invoking +// workflow execution updates and used to indicate to the server how long the +// client wishes to wait for a return value from the RPC. If any value other +// than UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED is sent by the +// client then the RPC will complete before the update is finished and will +// return a handle to the running update so that it can later be polled for +// completion. +enum UpdateWorkflowExecutionLifecycleStage { + // An unspecified vale for this enum. + UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_UNSPECIFIED = 0; + // The gRPC call will not return until the update request has been admitted + // by the server - it may be the case that due to a considerations like load + // or resource limits that an update is made to wait before the server will + // indicate that it has been received and will be processed. This value + // does not wait for any sort of acknowledgement from a worker. + UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ADMITTED = 1; + // The gRPC call will not return until the update has passed validation on + // a worker. + UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED = 2; + // The gRPC call will not return until the update has executed to completion + // on a worker and has either been rejected or returned a value or an error. + UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED = 3; +} diff --git a/.temporal-api/temporal/api/enums/v1/workflow.proto b/.temporal-api/temporal/api/enums/v1/workflow.proto new file mode 100644 index 0000000..fc0673b --- /dev/null +++ b/.temporal-api/temporal/api/enums/v1/workflow.proto @@ -0,0 +1,122 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.enums.v1; + +option go_package = "go.temporal.io/api/enums/v1;enums"; +option java_package = "io.temporal.api.enums.v1"; +option java_multiple_files = true; +option java_outer_classname = "WorkflowProto"; +option ruby_package = "Temporalio::Api::Enums::V1"; +option csharp_namespace = "Temporalio.Api.Enums.V1"; + +// Defines how new runs of a workflow with a particular ID may or may not be allowed. Note that +// it is *never* valid to have two actively running instances of the same workflow id. +enum WorkflowIdReusePolicy { + WORKFLOW_ID_REUSE_POLICY_UNSPECIFIED = 0; + // Allow starting a workflow execution using the same workflow id. + WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE = 1; + // Allow starting a workflow execution using the same workflow id, only when the last + // execution's final state is one of [terminated, cancelled, timed out, failed]. + WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY = 2; + // Do not permit re-use of the workflow id for this workflow. Future start workflow requests + // could potentially change the policy, allowing re-use of the workflow id. + WORKFLOW_ID_REUSE_POLICY_REJECT_DUPLICATE = 3; + // If a workflow is running using the same workflow ID, terminate it and start a new one. + // If no running workflow, then the behavior is the same as ALLOW_DUPLICATE + WORKFLOW_ID_REUSE_POLICY_TERMINATE_IF_RUNNING = 4; +} + +// Defines how child workflows will react to their parent completing +enum ParentClosePolicy { + PARENT_CLOSE_POLICY_UNSPECIFIED = 0; + // The child workflow will also terminate + PARENT_CLOSE_POLICY_TERMINATE = 1; + // The child workflow will do nothing + PARENT_CLOSE_POLICY_ABANDON = 2; + // Cancellation will be requested of the child workflow + PARENT_CLOSE_POLICY_REQUEST_CANCEL = 3; +} + +enum ContinueAsNewInitiator { + CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED = 0; + // The workflow itself requested to continue as new + CONTINUE_AS_NEW_INITIATOR_WORKFLOW = 1; + // The workflow continued as new because it is retrying + CONTINUE_AS_NEW_INITIATOR_RETRY = 2; + // The workflow continued as new because cron has triggered a new execution + CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE = 3; +} + +// (-- api-linter: core::0216::synonyms=disabled +// aip.dev/not-precedent: There is WorkflowExecutionState already in another package. --) +enum WorkflowExecutionStatus { + WORKFLOW_EXECUTION_STATUS_UNSPECIFIED = 0; + // Value 1 is hardcoded in SQL persistence. + WORKFLOW_EXECUTION_STATUS_RUNNING = 1; + WORKFLOW_EXECUTION_STATUS_COMPLETED = 2; + WORKFLOW_EXECUTION_STATUS_FAILED = 3; + WORKFLOW_EXECUTION_STATUS_CANCELED = 4; + WORKFLOW_EXECUTION_STATUS_TERMINATED = 5; + WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW = 6; + WORKFLOW_EXECUTION_STATUS_TIMED_OUT = 7; +} + +enum PendingActivityState { + PENDING_ACTIVITY_STATE_UNSPECIFIED = 0; + PENDING_ACTIVITY_STATE_SCHEDULED = 1; + PENDING_ACTIVITY_STATE_STARTED = 2; + PENDING_ACTIVITY_STATE_CANCEL_REQUESTED = 3; +} + +enum PendingWorkflowTaskState { + PENDING_WORKFLOW_TASK_STATE_UNSPECIFIED = 0; + PENDING_WORKFLOW_TASK_STATE_SCHEDULED = 1; + PENDING_WORKFLOW_TASK_STATE_STARTED = 2; +} + +enum HistoryEventFilterType { + HISTORY_EVENT_FILTER_TYPE_UNSPECIFIED = 0; + HISTORY_EVENT_FILTER_TYPE_ALL_EVENT = 1; + HISTORY_EVENT_FILTER_TYPE_CLOSE_EVENT = 2; +} + +enum RetryState { + RETRY_STATE_UNSPECIFIED = 0; + RETRY_STATE_IN_PROGRESS = 1; + RETRY_STATE_NON_RETRYABLE_FAILURE = 2; + RETRY_STATE_TIMEOUT = 3; + RETRY_STATE_MAXIMUM_ATTEMPTS_REACHED = 4; + RETRY_STATE_RETRY_POLICY_NOT_SET = 5; + RETRY_STATE_INTERNAL_SERVER_ERROR = 6; + RETRY_STATE_CANCEL_REQUESTED = 7; +} + +enum TimeoutType { + TIMEOUT_TYPE_UNSPECIFIED = 0; + TIMEOUT_TYPE_START_TO_CLOSE = 1; + TIMEOUT_TYPE_SCHEDULE_TO_START = 2; + TIMEOUT_TYPE_SCHEDULE_TO_CLOSE = 3; + TIMEOUT_TYPE_HEARTBEAT = 4; +} diff --git a/.temporal-api/temporal/api/errordetails/v1/message.proto b/.temporal-api/temporal/api/errordetails/v1/message.proto new file mode 100644 index 0000000..5c9dec6 --- /dev/null +++ b/.temporal-api/temporal/api/errordetails/v1/message.proto @@ -0,0 +1,113 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +// These error details are supplied in google.rpc.Status#details as described in "Google APIs, Error Model" (https://cloud.google.com/apis/design/errors#error_model) +// and extend standard Error Details defined in https://github.com/googleapis/googleapis/blob/master/google/rpc/error_details.proto + +package temporal.api.errordetails.v1; + +option go_package = "go.temporal.io/api/errordetails/v1;errordetails"; +option java_package = "io.temporal.api.errordetails.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::ErrorDetails::V1"; +option csharp_namespace = "Temporalio.Api.ErrorDetails.V1"; + +import "temporal/api/common/v1/message.proto"; + +import "temporal/api/enums/v1/failed_cause.proto"; +import "temporal/api/enums/v1/namespace.proto"; + +message NotFoundFailure { + string current_cluster = 1; + string active_cluster = 2; +} + +message WorkflowExecutionAlreadyStartedFailure { + string start_request_id = 1; + string run_id = 2; +} + +message NamespaceNotActiveFailure { + string namespace = 1; + string current_cluster = 2; + string active_cluster = 3; +} + +message NamespaceInvalidStateFailure { + string namespace = 1; + // Current state of the requested namespace. + temporal.api.enums.v1.NamespaceState state = 2; + // Allowed namespace states for requested operation. + // For example NAMESPACE_STATE_DELETED is forbidden for most operations but allowed for DescribeNamespace. + repeated temporal.api.enums.v1.NamespaceState allowed_states = 3; +} + +message NamespaceNotFoundFailure { + string namespace = 1; +} + +message NamespaceAlreadyExistsFailure { +} + +message ClientVersionNotSupportedFailure { + string client_version = 1; + string client_name = 2; + string supported_versions = 3; +} + +message ServerVersionNotSupportedFailure { + string server_version = 1; + string client_supported_server_versions = 2; +} + +message CancellationAlreadyRequestedFailure { +} + +message QueryFailedFailure { +} + +message PermissionDeniedFailure { + string reason = 1; +} + +message ResourceExhaustedFailure { + temporal.api.enums.v1.ResourceExhaustedCause cause = 1; +} + +message SystemWorkflowFailure { + // WorkflowId and RunId of the Temporal system workflow performing the underlying operation. + // Looking up the info of the system workflow run may help identify the issue causing the failure. + temporal.api.common.v1.WorkflowExecution workflow_execution = 1; + // Serialized error returned by the system workflow performing the underlying operation. + string workflow_error = 2; +} + +message WorkflowNotReadyFailure { +} + +message NewerBuildExistsFailure { + // The current default compatible build ID which will receive tasks + string default_build_id = 1; +} diff --git a/.temporal-api/temporal/api/failure/v1/message.proto b/.temporal-api/temporal/api/failure/v1/message.proto new file mode 100644 index 0000000..dcda232 --- /dev/null +++ b/.temporal-api/temporal/api/failure/v1/message.proto @@ -0,0 +1,114 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.failure.v1; + +option go_package = "go.temporal.io/api/failure/v1;failure"; +option java_package = "io.temporal.api.failure.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Failure::V1"; +option csharp_namespace = "Temporalio.Api.Failure.V1"; + +import "temporal/api/common/v1/message.proto"; +import "temporal/api/enums/v1/workflow.proto"; + +message ApplicationFailureInfo { + string type = 1; + bool non_retryable = 2; + temporal.api.common.v1.Payloads details = 3; +} + +message TimeoutFailureInfo { + temporal.api.enums.v1.TimeoutType timeout_type = 1; + temporal.api.common.v1.Payloads last_heartbeat_details = 2; +} + +message CanceledFailureInfo { + temporal.api.common.v1.Payloads details = 1; +} + +message TerminatedFailureInfo { +} + +message ServerFailureInfo { + bool non_retryable = 1; +} + +message ResetWorkflowFailureInfo { + temporal.api.common.v1.Payloads last_heartbeat_details = 1; +} + +message ActivityFailureInfo { + int64 scheduled_event_id = 1; + int64 started_event_id = 2; + string identity = 3; + temporal.api.common.v1.ActivityType activity_type = 4; + string activity_id = 5; + temporal.api.enums.v1.RetryState retry_state = 6; +} + +message ChildWorkflowExecutionFailureInfo { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution workflow_execution = 2; + temporal.api.common.v1.WorkflowType workflow_type = 3; + int64 initiated_event_id = 4; + int64 started_event_id = 5; + temporal.api.enums.v1.RetryState retry_state = 6; +} + +message Failure { + string message = 1; + // The source this Failure originated in, e.g. TypeScriptSDK / JavaSDK + // In some SDKs this is used to rehydrate the stack trace into an exception object. + string source = 2; + string stack_trace = 3; + // Alternative way to supply `message` and `stack_trace` and possibly other attributes, used for encryption of + // errors originating in user code which might contain sensitive information. + // The `encoded_attributes` Payload could represent any serializable object, e.g. JSON object or a `Failure` proto + // message. + // + // SDK authors: + // - The SDK should provide a default `encodeFailureAttributes` and `decodeFailureAttributes` implementation that: + // - Uses a JSON object to represent `{ message, stack_trace }`. + // - Overwrites the original message with "Encoded failure" to indicate that more information could be extracted. + // - Overwrites the original stack_trace with an empty string. + // - The resulting JSON object is converted to Payload using the default PayloadConverter and should be processed + // by the user-provided PayloadCodec + // + // - If there's demand, we could allow overriding the default SDK implementation to encode other opaque Failure attributes. + // (-- api-linter: core::0203::optional=disabled --) + temporal.api.common.v1.Payload encoded_attributes = 20; + Failure cause = 4; + oneof failure_info { + ApplicationFailureInfo application_failure_info = 5; + TimeoutFailureInfo timeout_failure_info = 6; + CanceledFailureInfo canceled_failure_info = 7; + TerminatedFailureInfo terminated_failure_info = 8; + ServerFailureInfo server_failure_info = 9; + ResetWorkflowFailureInfo reset_workflow_failure_info = 10; + ActivityFailureInfo activity_failure_info = 11; + ChildWorkflowExecutionFailureInfo child_workflow_execution_failure_info = 12; + } +} diff --git a/.temporal-api/temporal/api/filter/v1/message.proto b/.temporal-api/temporal/api/filter/v1/message.proto new file mode 100644 index 0000000..de0af83 --- /dev/null +++ b/.temporal-api/temporal/api/filter/v1/message.proto @@ -0,0 +1,56 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.filter.v1; + +option go_package = "go.temporal.io/api/filter/v1;filter"; +option java_package = "io.temporal.api.filter.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Filter::V1"; +option csharp_namespace = "Temporalio.Api.Filter.V1"; + +import "google/protobuf/timestamp.proto"; + +import "dependencies/gogoproto/gogo.proto"; + +import "temporal/api/enums/v1/workflow.proto"; + +message WorkflowExecutionFilter { + string workflow_id = 1; + string run_id = 2; +} + +message WorkflowTypeFilter { + string name = 1; +} + +message StartTimeFilter { + google.protobuf.Timestamp earliest_time = 1 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp latest_time = 2 [(gogoproto.stdtime) = true]; +} + +message StatusFilter { + temporal.api.enums.v1.WorkflowExecutionStatus status = 1; +} diff --git a/.temporal-api/temporal/api/history/v1/message.proto b/.temporal-api/temporal/api/history/v1/message.proto new file mode 100644 index 0000000..dc38ffe --- /dev/null +++ b/.temporal-api/temporal/api/history/v1/message.proto @@ -0,0 +1,822 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.history.v1; + +option go_package = "go.temporal.io/api/history/v1;history"; +option java_package = "io.temporal.api.history.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::History::V1"; +option csharp_namespace = "Temporalio.Api.History.V1"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +import "dependencies/gogoproto/gogo.proto"; + +import "temporal/api/enums/v1/event_type.proto"; +import "temporal/api/enums/v1/failed_cause.proto"; +import "temporal/api/enums/v1/workflow.proto"; +import "temporal/api/common/v1/message.proto"; +import "temporal/api/failure/v1/message.proto"; +import "temporal/api/taskqueue/v1/message.proto"; +import "temporal/api/update/v1/message.proto"; +import "temporal/api/workflow/v1/message.proto"; +import "temporal/api/sdk/v1/task_complete_metadata.proto"; + +// Always the first event in workflow history +message WorkflowExecutionStartedEventAttributes { + temporal.api.common.v1.WorkflowType workflow_type = 1; + // If this workflow is a child, the namespace our parent lives in. + // SDKs and UI tools should use `parent_workflow_namespace` field but server must use `parent_workflow_namespace_id` only. + string parent_workflow_namespace = 2; + string parent_workflow_namespace_id = 27; + // Contains information about parent workflow execution that initiated the child workflow these attributes belong to. + // If the workflow these attributes belong to is not a child workflow of any other execution, this field will not be populated. + temporal.api.common.v1.WorkflowExecution parent_workflow_execution = 3; + // EventID of the child execution initiated event in parent workflow + int64 parent_initiated_event_id = 4; + temporal.api.taskqueue.v1.TaskQueue task_queue = 5; + // SDK will deserialize this and provide it as arguments to the workflow function + temporal.api.common.v1.Payloads input = 6; + // Total workflow execution timeout including retries and continue as new. + google.protobuf.Duration workflow_execution_timeout = 7 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow run. + google.protobuf.Duration workflow_run_timeout = 8 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow task. + google.protobuf.Duration workflow_task_timeout = 9 [(gogoproto.stdduration) = true]; + // Run id of the previous workflow which continued-as-new or retired or cron executed into this + // workflow. + string continued_execution_run_id = 10; + temporal.api.enums.v1.ContinueAsNewInitiator initiator = 11; + temporal.api.failure.v1.Failure continued_failure = 12; + temporal.api.common.v1.Payloads last_completion_result = 13; + // This is the run id when the WorkflowExecutionStarted event was written. + // A workflow reset changes the execution run_id, but preserves this field. + string original_execution_run_id = 14; + // Identity of the client who requested this execution + string identity = 15; + // This is the very first runId along the chain of ContinueAsNew, Retry, Cron and Reset. + // Used to identify a chain. + string first_execution_run_id = 16; + temporal.api.common.v1.RetryPolicy retry_policy = 17; + // Starting at 1, the number of times we have tried to execute this workflow + int32 attempt = 18; + // The absolute time at which the workflow will be timed out. + // This is passed without change to the next run/retry of a workflow. + google.protobuf.Timestamp workflow_execution_expiration_time = 19 [(gogoproto.stdtime) = true]; + // If this workflow runs on a cron schedule, it will appear here + string cron_schedule = 20; + // For a cron workflow, this contains the amount of time between when this iteration of + // the cron workflow was scheduled and when it should run next per its cron_schedule. + google.protobuf.Duration first_workflow_task_backoff = 21 [(gogoproto.stdduration) = true]; + temporal.api.common.v1.Memo memo = 22; + temporal.api.common.v1.SearchAttributes search_attributes = 23; + temporal.api.workflow.v1.ResetPoints prev_auto_reset_points = 24; + temporal.api.common.v1.Header header = 25; + // Version of the child execution initiated event in parent workflow + // It should be used together with parent_initiated_event_id to identify + // a child initiated event for global namespace + int64 parent_initiated_event_version = 26; + // This field is new in 1.21. + string workflow_id = 28; + // If this workflow intends to use anything other than the current overall default version for + // the queue, then we include it here. + temporal.api.common.v1.WorkerVersionStamp source_version_stamp = 29; +} + +message WorkflowExecutionCompletedEventAttributes { + // Serialized result of workflow completion (ie: The return value of the workflow function) + temporal.api.common.v1.Payloads result = 1; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 2; + // If another run is started by cron, this contains the new run id. + string new_execution_run_id = 3; +} + +message WorkflowExecutionFailedEventAttributes { + // Serialized result of workflow failure (ex: An exception thrown, or error returned) + temporal.api.failure.v1.Failure failure = 1; + temporal.api.enums.v1.RetryState retry_state = 2; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 3; + // If another run is started by cron or retry, this contains the new run id. + string new_execution_run_id = 4; +} + +message WorkflowExecutionTimedOutEventAttributes { + temporal.api.enums.v1.RetryState retry_state = 1; + // If another run is started by cron or retry, this contains the new run id. + string new_execution_run_id = 2; +} + +message WorkflowExecutionContinuedAsNewEventAttributes { + // The run ID of the new workflow started by this continue-as-new + string new_execution_run_id = 1; + temporal.api.common.v1.WorkflowType workflow_type = 2; + temporal.api.taskqueue.v1.TaskQueue task_queue = 3; + temporal.api.common.v1.Payloads input = 4; + // Timeout of a single workflow run. + google.protobuf.Duration workflow_run_timeout = 5 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow task. + google.protobuf.Duration workflow_task_timeout = 6 [(gogoproto.stdduration) = true]; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 7; + // TODO: How and is this used? + google.protobuf.Duration backoff_start_interval = 8 [(gogoproto.stdduration) = true]; + temporal.api.enums.v1.ContinueAsNewInitiator initiator = 9; + // TODO: David are these right? + // Deprecated. If a workflow's retry policy would cause a new run to start when the current one + // has failed, this field would be populated with that failure. Now (when supported by server + // and sdk) the final event will be `WORKFLOW_EXECUTION_FAILED` with `new_execution_run_id` set. + temporal.api.failure.v1.Failure failure = 10; + // TODO: Is this the result of *this* workflow as it continued-as-new? + temporal.api.common.v1.Payloads last_completion_result = 11; + temporal.api.common.v1.Header header = 12; + temporal.api.common.v1.Memo memo = 13; + temporal.api.common.v1.SearchAttributes search_attributes = 14; + // If this is set, the workflow executing this command wishes to continue as new using a version + // compatible with the version that this workflow most recently ran on. + bool use_compatible_version = 15; + + // workflow_execution_timeout is omitted as it shouldn't be overridden from within a workflow. +} + +message WorkflowTaskScheduledEventAttributes { + // The task queue this workflow task was enqueued in, which could be a normal or sticky queue + temporal.api.taskqueue.v1.TaskQueue task_queue = 1; + // How long the worker has to process this task once receiving it before it times out + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration start_to_close_timeout = 2 [(gogoproto.stdduration) = true]; + // Starting at 1, how many attempts there have been to complete this task + int32 attempt = 3; +} + +message WorkflowTaskStartedEventAttributes { + // The id of the `WORKFLOW_TASK_SCHEDULED` event this task corresponds to + int64 scheduled_event_id = 1; + // Identity of the worker who picked up this task + string identity = 2; + // TODO: ? Appears unused? + string request_id = 3; + // True if this workflow should continue-as-new soon because its history size (in + // either event count or bytes) is getting large. + bool suggest_continue_as_new = 4; + // Total history size in bytes, which the workflow might use to decide when to + // continue-as-new regardless of the suggestion. Note that history event count is + // just the event id of this event, so we don't include it explicitly here. + int64 history_size_bytes = 5; +} + +message WorkflowTaskCompletedEventAttributes { + // The id of the `WORKFLOW_TASK_SCHEDULED` event this task corresponds to + int64 scheduled_event_id = 1; + // The id of the `WORKFLOW_TASK_STARTED` event this task corresponds to + int64 started_event_id = 2; + // Identity of the worker who completed this task + string identity = 3; + // Binary ID of the worker who completed this task + string binary_checksum = 4; + // Version info of the worker who processed this workflow task. If present, the `build_id` field + // within is also used as `binary_checksum`, which may be omitted in that case (it may also be + // populated to preserve compatibility). + temporal.api.common.v1.WorkerVersionStamp worker_version = 5; + // Data the SDK wishes to record for itself, but server need not interpret, and does not + // directly impact workflow state. + temporal.api.sdk.v1.WorkflowTaskCompletedMetadata sdk_metadata = 6; + + // Local usage data sent during workflow task completion and recorded here for posterity + temporal.api.common.v1.MeteringMetadata metering_metadata = 13; +} + +message WorkflowTaskTimedOutEventAttributes { + // The id of the `WORKFLOW_TASK_SCHEDULED` event this task corresponds to + int64 scheduled_event_id = 1; + // The id of the `WORKFLOW_TASK_STARTED` event this task corresponds to + int64 started_event_id = 2; + temporal.api.enums.v1.TimeoutType timeout_type = 3; +} + +message WorkflowTaskFailedEventAttributes { + // The id of the `WORKFLOW_TASK_SCHEDULED` event this task corresponds to + int64 scheduled_event_id = 1; + // The id of the `WORKFLOW_TASK_STARTED` event this task corresponds to + int64 started_event_id = 2; + temporal.api.enums.v1.WorkflowTaskFailedCause cause = 3; + // The failure details + temporal.api.failure.v1.Failure failure = 4; + // If a worker explicitly failed this task, it's identity. TODO: What is this set to if server fails the task? + string identity = 5; + // The original run id of the workflow. For reset workflow. + string base_run_id = 6; + // If the workflow is being reset, the new run id. + string new_run_id = 7; + // TODO: ? + int64 fork_event_version = 8; + // DEPRECATED since 1.21 - use `worker_version` instead. + // If a worker explicitly failed this task, its binary id + string binary_checksum = 9; + // Version info of the worker who processed this workflow task. If present, the `build_id` field + // within is also used as `binary_checksum`, which may be omitted in that case (it may also be + // populated to preserve compatibility). + temporal.api.common.v1.WorkerVersionStamp worker_version = 10; +} + +message ActivityTaskScheduledEventAttributes { + // The worker/user assigned identifier for the activity + string activity_id = 1; + temporal.api.common.v1.ActivityType activity_type = 2; + // This used to be a `namespace` field which allowed to schedule activity in another namespace. + reserved 3; + temporal.api.taskqueue.v1.TaskQueue task_queue = 4; + temporal.api.common.v1.Header header = 5; + temporal.api.common.v1.Payloads input = 6; + // Indicates how long the caller is willing to wait for an activity completion. Limits how long + // retries will be attempted. Either this or `start_to_close_timeout` must be specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration schedule_to_close_timeout = 7 [(gogoproto.stdduration) = true]; + // Limits time an activity task can stay in a task queue before a worker picks it up. This + // timeout is always non retryable, as all a retry would achieve is to put it back into the same + // queue. Defaults to `schedule_to_close_timeout` or workflow execution timeout if not + // specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration schedule_to_start_timeout = 8 [(gogoproto.stdduration) = true]; + // Maximum time an activity is allowed to execute after being picked up by a worker. This + // timeout is always retryable. Either this or `schedule_to_close_timeout` must be + // specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration start_to_close_timeout = 9 [(gogoproto.stdduration) = true]; + // Maximum permitted time between successful worker heartbeats. + google.protobuf.Duration heartbeat_timeout = 10 [(gogoproto.stdduration) = true]; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 11; + // Activities are assigned a default retry policy controlled by the service's dynamic + // configuration. Retries will happen up to `schedule_to_close_timeout`. To disable retries set + // retry_policy.maximum_attempts to 1. + temporal.api.common.v1.RetryPolicy retry_policy = 12; + // If this is set, the workflow executing this command wishes to start the activity using + // a version compatible with the version that this workflow most recently ran on, if such + // behavior is possible. + bool use_compatible_version = 13; +} + +message ActivityTaskStartedEventAttributes { + // The id of the `ACTIVITY_TASK_SCHEDULED` event this task corresponds to + int64 scheduled_event_id = 1; + // id of the worker that picked up this task + string identity = 2; + // TODO ?? + string request_id = 3; + // Starting at 1, the number of times this task has been attempted + int32 attempt = 4; + // Will be set to the most recent failure details, if this task has previously failed and then + // been retried. + temporal.api.failure.v1.Failure last_failure = 5; +} + +message ActivityTaskCompletedEventAttributes { + // Serialized results of the activity. IE: The return value of the activity function + temporal.api.common.v1.Payloads result = 1; + // The id of the `ACTIVITY_TASK_SCHEDULED` event this completion corresponds to + int64 scheduled_event_id = 2; + // The id of the `ACTIVITY_TASK_STARTED` event this completion corresponds to + int64 started_event_id = 3; + // id of the worker that completed this task + string identity = 4; + // Version info of the worker who processed this workflow task. + temporal.api.common.v1.WorkerVersionStamp worker_version = 5; +} + +message ActivityTaskFailedEventAttributes { + // Failure details + temporal.api.failure.v1.Failure failure = 1; + // The id of the `ACTIVITY_TASK_SCHEDULED` event this failure corresponds to + int64 scheduled_event_id = 2; + // The id of the `ACTIVITY_TASK_STARTED` event this failure corresponds to + int64 started_event_id = 3; + // id of the worker that failed this task + string identity = 4; + temporal.api.enums.v1.RetryState retry_state = 5; + // Version info of the worker who processed this workflow task. + temporal.api.common.v1.WorkerVersionStamp worker_version = 6; +} + +message ActivityTaskTimedOutEventAttributes { + // If this activity had failed, was retried, and then timed out, that failure is stored as the + // `cause` in here. + temporal.api.failure.v1.Failure failure = 1; + // The id of the `ACTIVITY_TASK_SCHEDULED` event this timeout corresponds to + int64 scheduled_event_id = 2; + // The id of the `ACTIVITY_TASK_STARTED` event this timeout corresponds to + int64 started_event_id = 3; + temporal.api.enums.v1.RetryState retry_state = 4; +} + +message ActivityTaskCancelRequestedEventAttributes { + // The id of the `ACTIVITY_TASK_SCHEDULED` event this cancel request corresponds to + int64 scheduled_event_id = 1; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 2; +} + +message ActivityTaskCanceledEventAttributes { + // Additional information that the activity reported upon confirming cancellation + temporal.api.common.v1.Payloads details = 1; + // id of the most recent `ACTIVITY_TASK_CANCEL_REQUESTED` event which refers to the same + // activity + int64 latest_cancel_requested_event_id = 2; + // The id of the `ACTIVITY_TASK_SCHEDULED` event this cancel confirmation corresponds to + int64 scheduled_event_id = 3; + // The id of the `ACTIVITY_TASK_STARTED` event this cancel confirmation corresponds to + int64 started_event_id = 4; + // id of the worker who canceled this activity + string identity = 5; + // Version info of the worker who processed this workflow task. + temporal.api.common.v1.WorkerVersionStamp worker_version = 6; +} + +message TimerStartedEventAttributes { + // The worker/user assigned id for this timer + string timer_id = 1; + // How long until this timer fires + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration start_to_fire_timeout = 2 [(gogoproto.stdduration) = true]; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 3; +} + +message TimerFiredEventAttributes { + // Will match the `timer_id` from `TIMER_STARTED` event for this timer + string timer_id = 1; + // The id of the `TIMER_STARTED` event itself + int64 started_event_id = 2; +} + +message TimerCanceledEventAttributes { + // Will match the `timer_id` from `TIMER_STARTED` event for this timer + string timer_id = 1; + // The id of the `TIMER_STARTED` event itself + int64 started_event_id = 2; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 3; + // The id of the worker who requested this cancel + string identity = 4; +} + +message WorkflowExecutionCancelRequestedEventAttributes { + // User provided reason for requesting cancellation + // TODO: shall we create a new field with name "reason" and deprecate this one? + string cause = 1; + // TODO: Is this the ID of the event in the workflow which initiated this cancel, if there was one? + int64 external_initiated_event_id = 2; + temporal.api.common.v1.WorkflowExecution external_workflow_execution = 3; + // id of the worker or client who requested this cancel + string identity = 4; +} + +message WorkflowExecutionCanceledEventAttributes { + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 1; + temporal.api.common.v1.Payloads details = 2; +} + +message MarkerRecordedEventAttributes { + // Workers use this to identify the "types" of various markers. Ex: Local activity, side effect. + string marker_name = 1; + // Serialized information recorded in the marker + map details = 2; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 3; + temporal.api.common.v1.Header header = 4; + // Some uses of markers, like a local activity, could "fail". If they did that is recorded here. + temporal.api.failure.v1.Failure failure = 5; +} + +message WorkflowExecutionSignaledEventAttributes { + // The name/type of the signal to fire + string signal_name = 1; + // Will be deserialized and provided as argument(s) to the signal handler + temporal.api.common.v1.Payloads input = 2; + // id of the worker/client who sent this signal + string identity = 3; + // Headers that were passed by the sender of the signal and copied by temporal + // server into the workflow task. + temporal.api.common.v1.Header header = 4; + // Indicates the signal did not generate a new workflow task when received. + bool skip_generate_workflow_task = 5; +} + +message WorkflowExecutionTerminatedEventAttributes { + // User/client provided reason for termination + string reason = 1; + temporal.api.common.v1.Payloads details = 2; + // id of the client who requested termination + string identity = 3; +} + +message RequestCancelExternalWorkflowExecutionInitiatedEventAttributes { + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 1; + // The namespace the workflow to be cancelled lives in. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 2; + string namespace_id = 7; + temporal.api.common.v1.WorkflowExecution workflow_execution = 3; + // Deprecated + string control = 4; + // Workers are expected to set this to true if the workflow they are requesting to cancel is + // a child of the workflow which issued the request + bool child_workflow_only = 5; + // Reason for requesting the cancellation + string reason = 6; +} + +message RequestCancelExternalWorkflowExecutionFailedEventAttributes { + temporal.api.enums.v1.CancelExternalWorkflowExecutionFailedCause cause = 1; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 2; + // Namespace of the workflow which failed to cancel. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 3; + string namespace_id = 7; + temporal.api.common.v1.WorkflowExecution workflow_execution = 4; + // id of the `REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED` event this failure + // corresponds to + int64 initiated_event_id = 5; + // Deprecated + string control = 6; +} + +message ExternalWorkflowExecutionCancelRequestedEventAttributes { + // id of the `REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED` event this event corresponds + // to + int64 initiated_event_id = 1; + // Namespace of the to-be-cancelled workflow. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 2; + string namespace_id = 4; + temporal.api.common.v1.WorkflowExecution workflow_execution = 3; +} + +message SignalExternalWorkflowExecutionInitiatedEventAttributes { + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 1; + // Namespace of the to-be-signalled workflow. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 2; + string namespace_id = 9; + temporal.api.common.v1.WorkflowExecution workflow_execution = 3; + // name/type of the signal to fire in the external workflow + string signal_name = 4; + // Serialized arguments to provide to the signal handler + temporal.api.common.v1.Payloads input = 5; + // Deprecated + string control = 6; + // Workers are expected to set this to true if the workflow they are requesting to cancel is + // a child of the workflow which issued the request + bool child_workflow_only = 7; + temporal.api.common.v1.Header header = 8; +} + +message SignalExternalWorkflowExecutionFailedEventAttributes { + temporal.api.enums.v1.SignalExternalWorkflowExecutionFailedCause cause = 1; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 2; + // Namespace of the workflow which failed the signal. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 3; + string namespace_id = 7; + temporal.api.common.v1.WorkflowExecution workflow_execution = 4; + int64 initiated_event_id = 5; + // Deprecated + string control = 6; +} + +message ExternalWorkflowExecutionSignaledEventAttributes { + // id of the `SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_INITIATED` event this event corresponds to + int64 initiated_event_id = 1; + // Namespace of the workflow which was signaled. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 2; + string namespace_id = 5; + temporal.api.common.v1.WorkflowExecution workflow_execution = 3; + // Deprecated + string control = 4; +} + +message UpsertWorkflowSearchAttributesEventAttributes { + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 1; + temporal.api.common.v1.SearchAttributes search_attributes = 2; +} + +message WorkflowPropertiesModifiedEventAttributes { + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 1; + // If set, update the workflow memo with the provided values. The values will be merged with + // the existing memo. If the user wants to delete values, a default/empty Payload should be + // used as the value for the key being deleted. + temporal.api.common.v1.Memo upserted_memo = 2; +} + +message StartChildWorkflowExecutionInitiatedEventAttributes { + // Namespace of the child workflow. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 1; + string namespace_id = 18; + string workflow_id = 2; + temporal.api.common.v1.WorkflowType workflow_type = 3; + temporal.api.taskqueue.v1.TaskQueue task_queue = 4; + temporal.api.common.v1.Payloads input = 5; + // Total workflow execution timeout including retries and continue as new. + google.protobuf.Duration workflow_execution_timeout = 6 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow run. + google.protobuf.Duration workflow_run_timeout = 7 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow task. + google.protobuf.Duration workflow_task_timeout = 8 [(gogoproto.stdduration) = true]; + // Default: PARENT_CLOSE_POLICY_TERMINATE. + temporal.api.enums.v1.ParentClosePolicy parent_close_policy = 9; + // Deprecated + string control = 10; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 11; + // Default: WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE. + temporal.api.enums.v1.WorkflowIdReusePolicy workflow_id_reuse_policy = 12; + temporal.api.common.v1.RetryPolicy retry_policy = 13; + // If this child runs on a cron schedule, it will appear here + string cron_schedule = 14; + temporal.api.common.v1.Header header = 15; + temporal.api.common.v1.Memo memo = 16; + temporal.api.common.v1.SearchAttributes search_attributes = 17; + // If this is set, the workflow executing this command wishes to start the child workflow using + // a version compatible with the version that this workflow most recently ran on, if such + // behavior is possible. + bool use_compatible_version = 19; +} + +message StartChildWorkflowExecutionFailedEventAttributes { + // Namespace of the child workflow. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 1; + string namespace_id = 8; + string workflow_id = 2; + temporal.api.common.v1.WorkflowType workflow_type = 3; + temporal.api.enums.v1.StartChildWorkflowExecutionFailedCause cause = 4; + // Deprecated + string control = 5; + // Id of the `START_CHILD_WORKFLOW_EXECUTION_INITIATED` event which this event corresponds to + int64 initiated_event_id = 6; + // The `WORKFLOW_TASK_COMPLETED` event which this command was reported with + int64 workflow_task_completed_event_id = 7; +} + +message ChildWorkflowExecutionStartedEventAttributes { + // Namespace of the child workflow. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 1; + string namespace_id = 6; + // Id of the `START_CHILD_WORKFLOW_EXECUTION_INITIATED` event which this event corresponds to + int64 initiated_event_id = 2; + temporal.api.common.v1.WorkflowExecution workflow_execution = 3; + temporal.api.common.v1.WorkflowType workflow_type = 4; + temporal.api.common.v1.Header header = 5; +} + +message ChildWorkflowExecutionCompletedEventAttributes { + temporal.api.common.v1.Payloads result = 1; + // Namespace of the child workflow. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 2; + string namespace_id = 7; + temporal.api.common.v1.WorkflowExecution workflow_execution = 3; + temporal.api.common.v1.WorkflowType workflow_type = 4; + // Id of the `START_CHILD_WORKFLOW_EXECUTION_INITIATED` event which this event corresponds to + int64 initiated_event_id = 5; + // Id of the `CHILD_WORKFLOW_EXECUTION_STARTED` event which this event corresponds to + int64 started_event_id = 6; +} + +message ChildWorkflowExecutionFailedEventAttributes { + temporal.api.failure.v1.Failure failure = 1; + // Namespace of the child workflow. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 2; + string namespace_id = 8; + temporal.api.common.v1.WorkflowExecution workflow_execution = 3; + temporal.api.common.v1.WorkflowType workflow_type = 4; + // Id of the `START_CHILD_WORKFLOW_EXECUTION_INITIATED` event which this event corresponds to + int64 initiated_event_id = 5; + // Id of the `CHILD_WORKFLOW_EXECUTION_STARTED` event which this event corresponds to + int64 started_event_id = 6; + temporal.api.enums.v1.RetryState retry_state = 7; +} + +message ChildWorkflowExecutionCanceledEventAttributes { + temporal.api.common.v1.Payloads details = 1; + // Namespace of the child workflow. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 2; + string namespace_id = 7; + temporal.api.common.v1.WorkflowExecution workflow_execution = 3; + temporal.api.common.v1.WorkflowType workflow_type = 4; + // Id of the `START_CHILD_WORKFLOW_EXECUTION_INITIATED` event which this event corresponds to + int64 initiated_event_id = 5; + // Id of the `CHILD_WORKFLOW_EXECUTION_STARTED` event which this event corresponds to + int64 started_event_id = 6; +} + +message ChildWorkflowExecutionTimedOutEventAttributes { + // Namespace of the child workflow. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 1; + string namespace_id = 7; + temporal.api.common.v1.WorkflowExecution workflow_execution = 2; + temporal.api.common.v1.WorkflowType workflow_type = 3; + // Id of the `START_CHILD_WORKFLOW_EXECUTION_INITIATED` event which this event corresponds to + int64 initiated_event_id = 4; + // Id of the `CHILD_WORKFLOW_EXECUTION_STARTED` event which this event corresponds to + int64 started_event_id = 5; + temporal.api.enums.v1.RetryState retry_state = 6; +} + +message ChildWorkflowExecutionTerminatedEventAttributes { + // Namespace of the child workflow. + // SDKs and UI tools should use `namespace` field but server must use `namespace_id` only. + string namespace = 1; + string namespace_id = 6; + temporal.api.common.v1.WorkflowExecution workflow_execution = 2; + temporal.api.common.v1.WorkflowType workflow_type = 3; + // Id of the `START_CHILD_WORKFLOW_EXECUTION_INITIATED` event which this event corresponds to + int64 initiated_event_id = 4; + // Id of the `CHILD_WORKFLOW_EXECUTION_STARTED` event which this event corresponds to + int64 started_event_id = 5; +} + +message WorkflowPropertiesModifiedExternallyEventAttributes { + // If set to a nonempty string, future workflow tasks for this workflow shall be dispatched on + // the provided queue. + string new_task_queue = 1; + // If set, update the workflow task timeout to this value. + google.protobuf.Duration new_workflow_task_timeout = 2 [(gogoproto.stdduration) = true]; + // If set, update the workflow run timeout to this value. May be set to 0 for no timeout. + google.protobuf.Duration new_workflow_run_timeout = 3 [(gogoproto.stdduration) = true]; + // If set, update the workflow execution timeout to this value. May be set to 0 for no timeout. + google.protobuf.Duration new_workflow_execution_timeout = 4 [(gogoproto.stdduration) = true]; + // If set, update the workflow memo with the provided values. The values will be merged with + // the existing memo. If the user wants to delete values, a default/empty Payload should be + // used as the value for the key being deleted. + temporal.api.common.v1.Memo upserted_memo = 5; +} + +message ActivityPropertiesModifiedExternallyEventAttributes { + // The id of the `ACTIVITY_TASK_SCHEDULED` event this modification corresponds to. + int64 scheduled_event_id = 1; + // If set, update the retry policy of the activity, replacing it with the specified one. + // The number of attempts at the activity is preserved. + temporal.api.common.v1.RetryPolicy new_retry_policy = 2; +} + +message WorkflowExecutionUpdateAcceptedEventAttributes { + // The instance ID of the update protocol that generated this event. + string protocol_instance_id = 1; + // The message ID of the original request message that initiated this + // update. Needed so that the worker can recreate and deliver that same + // message as part of replay. + string accepted_request_message_id = 2; + // The event ID used to sequence the original request message. + int64 accepted_request_sequencing_event_id = 3; + // The message payload of the original request message that initiated this + // update. + temporal.api.update.v1.Request accepted_request = 4; +} + +message WorkflowExecutionUpdateCompletedEventAttributes { + // The metadata about this update. + temporal.api.update.v1.Meta meta = 1; + + // The event ID indicating the acceptance of this update. + int64 accepted_event_id = 3; + + // The outcome of executing the workflow update function. + temporal.api.update.v1.Outcome outcome = 2; +} + +message WorkflowExecutionUpdateRejectedEventAttributes { + // The instance ID of the update protocol that generated this event. + string protocol_instance_id = 1; + // The message ID of the original request message that initiated this + // update. Needed so that the worker can recreate and deliver that same + // message as part of replay. + string rejected_request_message_id = 2; + // The event ID used to sequence the original request message. + int64 rejected_request_sequencing_event_id = 3; + // The message payload of the original request message that initiated this + // update. + temporal.api.update.v1.Request rejected_request = 4; + // The cause of rejection. + temporal.api.failure.v1.Failure failure = 5; +} + + +// History events are the method by which Temporal SDKs advance (or recreate) workflow state. +// See the `EventType` enum for more info about what each event is for. +message HistoryEvent { + // Monotonically increasing event number, starts at 1. + int64 event_id = 1; + google.protobuf.Timestamp event_time = 2 [(gogoproto.stdtime) = true]; + temporal.api.enums.v1.EventType event_type = 3; + // TODO: What is this? Appears unused by SDKs + int64 version = 4; + // TODO: What is this? Appears unused by SDKs + int64 task_id = 5; + // Set to true when the SDK may ignore the event as it does not impact workflow state or + // information in any way that the SDK need be concerned with. If an SDK encounters an event + // type which it does not understand, it must error unless this is true. If it is true, it's + // acceptable for the event type and/or attributes to be uninterpretable. + bool worker_may_ignore = 300; + // The event details. The type must match that in `event_type`. + oneof attributes { + WorkflowExecutionStartedEventAttributes workflow_execution_started_event_attributes = 6; + WorkflowExecutionCompletedEventAttributes workflow_execution_completed_event_attributes = 7; + WorkflowExecutionFailedEventAttributes workflow_execution_failed_event_attributes = 8; + WorkflowExecutionTimedOutEventAttributes workflow_execution_timed_out_event_attributes = 9; + WorkflowTaskScheduledEventAttributes workflow_task_scheduled_event_attributes = 10; + WorkflowTaskStartedEventAttributes workflow_task_started_event_attributes = 11; + WorkflowTaskCompletedEventAttributes workflow_task_completed_event_attributes = 12; + WorkflowTaskTimedOutEventAttributes workflow_task_timed_out_event_attributes = 13; + WorkflowTaskFailedEventAttributes workflow_task_failed_event_attributes = 14; + ActivityTaskScheduledEventAttributes activity_task_scheduled_event_attributes = 15; + ActivityTaskStartedEventAttributes activity_task_started_event_attributes = 16; + ActivityTaskCompletedEventAttributes activity_task_completed_event_attributes = 17; + ActivityTaskFailedEventAttributes activity_task_failed_event_attributes = 18; + ActivityTaskTimedOutEventAttributes activity_task_timed_out_event_attributes = 19; + TimerStartedEventAttributes timer_started_event_attributes = 20; + TimerFiredEventAttributes timer_fired_event_attributes = 21; + ActivityTaskCancelRequestedEventAttributes activity_task_cancel_requested_event_attributes = 22; + ActivityTaskCanceledEventAttributes activity_task_canceled_event_attributes = 23; + TimerCanceledEventAttributes timer_canceled_event_attributes = 24; + MarkerRecordedEventAttributes marker_recorded_event_attributes = 25; + WorkflowExecutionSignaledEventAttributes workflow_execution_signaled_event_attributes = 26; + WorkflowExecutionTerminatedEventAttributes workflow_execution_terminated_event_attributes = 27; + WorkflowExecutionCancelRequestedEventAttributes workflow_execution_cancel_requested_event_attributes = 28; + WorkflowExecutionCanceledEventAttributes workflow_execution_canceled_event_attributes = 29; + RequestCancelExternalWorkflowExecutionInitiatedEventAttributes request_cancel_external_workflow_execution_initiated_event_attributes = 30; + RequestCancelExternalWorkflowExecutionFailedEventAttributes request_cancel_external_workflow_execution_failed_event_attributes = 31; + ExternalWorkflowExecutionCancelRequestedEventAttributes external_workflow_execution_cancel_requested_event_attributes = 32; + WorkflowExecutionContinuedAsNewEventAttributes workflow_execution_continued_as_new_event_attributes = 33; + StartChildWorkflowExecutionInitiatedEventAttributes start_child_workflow_execution_initiated_event_attributes = 34; + StartChildWorkflowExecutionFailedEventAttributes start_child_workflow_execution_failed_event_attributes = 35; + ChildWorkflowExecutionStartedEventAttributes child_workflow_execution_started_event_attributes = 36; + ChildWorkflowExecutionCompletedEventAttributes child_workflow_execution_completed_event_attributes = 37; + ChildWorkflowExecutionFailedEventAttributes child_workflow_execution_failed_event_attributes = 38; + ChildWorkflowExecutionCanceledEventAttributes child_workflow_execution_canceled_event_attributes = 39; + ChildWorkflowExecutionTimedOutEventAttributes child_workflow_execution_timed_out_event_attributes = 40; + ChildWorkflowExecutionTerminatedEventAttributes child_workflow_execution_terminated_event_attributes = 41; + SignalExternalWorkflowExecutionInitiatedEventAttributes signal_external_workflow_execution_initiated_event_attributes = 42; + SignalExternalWorkflowExecutionFailedEventAttributes signal_external_workflow_execution_failed_event_attributes = 43; + ExternalWorkflowExecutionSignaledEventAttributes external_workflow_execution_signaled_event_attributes = 44; + UpsertWorkflowSearchAttributesEventAttributes upsert_workflow_search_attributes_event_attributes = 45; + WorkflowExecutionUpdateAcceptedEventAttributes workflow_execution_update_accepted_event_attributes = 46; + WorkflowExecutionUpdateRejectedEventAttributes workflow_execution_update_rejected_event_attributes = 47; + WorkflowExecutionUpdateCompletedEventAttributes workflow_execution_update_completed_event_attributes = 48; + WorkflowPropertiesModifiedExternallyEventAttributes workflow_properties_modified_externally_event_attributes = 49; + ActivityPropertiesModifiedExternallyEventAttributes activity_properties_modified_externally_event_attributes = 50; + WorkflowPropertiesModifiedEventAttributes workflow_properties_modified_event_attributes = 51; + } +} + +message History { + repeated HistoryEvent events = 1; +} diff --git a/.temporal-api/temporal/api/namespace/v1/message.proto b/.temporal-api/temporal/api/namespace/v1/message.proto new file mode 100644 index 0000000..009b480 --- /dev/null +++ b/.temporal-api/temporal/api/namespace/v1/message.proto @@ -0,0 +1,99 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.namespace.v1; + +option go_package = "go.temporal.io/api/namespace/v1;namespace"; +option java_package = "io.temporal.api.namespace.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Namespace::V1"; +option csharp_namespace = "Temporalio.Api.Namespace.V1"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +import "dependencies/gogoproto/gogo.proto"; + +import "temporal/api/enums/v1/namespace.proto"; + + +message NamespaceInfo { + string name = 1; + temporal.api.enums.v1.NamespaceState state = 2; + string description = 3; + string owner_email = 4; + // A key-value map for any customized purpose. + map data = 5; + string id = 6; + + // Whether scheduled workflows are supported on this namespace. This is only needed + // temporarily while the feature is experimental, so we can give it a high tag. + bool supports_schedules = 100; +} + +message NamespaceConfig { + google.protobuf.Duration workflow_execution_retention_ttl = 1 [(gogoproto.stdduration) = true]; + BadBinaries bad_binaries = 2; + // If unspecified (ARCHIVAL_STATE_UNSPECIFIED) then default server configuration is used. + temporal.api.enums.v1.ArchivalState history_archival_state = 3; + string history_archival_uri = 4; + // If unspecified (ARCHIVAL_STATE_UNSPECIFIED) then default server configuration is used. + temporal.api.enums.v1.ArchivalState visibility_archival_state = 5; + string visibility_archival_uri = 6; + // Map from field name to alias. + map custom_search_attribute_aliases = 7; +} + +message BadBinaries { + map binaries = 1; +} + +message BadBinaryInfo { + string reason = 1; + string operator = 2; + google.protobuf.Timestamp create_time = 3 [(gogoproto.stdtime) = true]; +} + +message UpdateNamespaceInfo { + string description = 1; + string owner_email = 2; + // A key-value map for any customized purpose. + // If data already exists on the namespace, + // this will merge with the existing key values. + map data = 3; + // New namespace state, server will reject if transition is not allowed. + // Allowed transitions are: + // Registered -> [ Deleted | Deprecated | Handover ] + // Handover -> [ Registered ] + // Default is NAMESPACE_STATE_UNSPECIFIED which is do not change state. + temporal.api.enums.v1.NamespaceState state = 4; +} + +message NamespaceFilter { + // By default namespaces in NAMESPACE_STATE_DELETED state are not included. + // Setting include_deleted to true will include deleted namespaces. + // Note: Namespace is in NAMESPACE_STATE_DELETED state when it was deleted from the system but associated data is not deleted yet. + bool include_deleted = 1; +} diff --git a/.temporal-api/temporal/api/operatorservice/v1/request_response.proto b/.temporal-api/temporal/api/operatorservice/v1/request_response.proto new file mode 100644 index 0000000..abe70f7 --- /dev/null +++ b/.temporal-api/temporal/api/operatorservice/v1/request_response.proto @@ -0,0 +1,126 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.operatorservice.v1; + +option go_package = "go.temporal.io/api/operatorservice/v1;operatorservice"; +option java_package = "io.temporal.api.operatorservice.v1"; +option java_multiple_files = true; +option java_outer_classname = "RequestResponseProto"; +option ruby_package = "Temporalio::Api::OperatorService::V1"; +option csharp_namespace = "Temporalio.Api.OperatorService.V1"; + +import "temporal/api/enums/v1/common.proto"; + +// (-- Search Attribute --) + +message AddSearchAttributesRequest { + // Mapping between search attribute name and its IndexedValueType. + map search_attributes = 1; + string namespace = 2; +} + +message AddSearchAttributesResponse { +} + +message RemoveSearchAttributesRequest { + // Search attribute names to delete. + repeated string search_attributes = 1; + string namespace = 2; +} + +message RemoveSearchAttributesResponse { +} + +message ListSearchAttributesRequest { + string namespace = 1; +} + +message ListSearchAttributesResponse { + // Mapping between custom (user-registered) search attribute name to its IndexedValueType. + map custom_attributes = 1; + // Mapping between system (predefined) search attribute name to its IndexedValueType. + map system_attributes = 2; + // Mapping from the attribute name to the visibility storage native type. + map storage_schema = 3; +} + +// (-- api-linter: core::0135::request-unknown-fields=disabled +// aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) +// (-- api-linter: core::0135::request-name-required=disabled +// aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) +message DeleteNamespaceRequest { + // Only one of namespace or namespace_id must be specified to identify namespace. + string namespace = 1; + string namespace_id = 2; +} + +message DeleteNamespaceResponse { + // Temporary namespace name that is used during reclaim resources step. + string deleted_namespace = 1; +} + +message AddOrUpdateRemoteClusterRequest { + // Frontend Address is a cross cluster accessible address. + string frontend_address = 1; + // Flag to enable / disable the cross cluster connection. + bool enable_remote_cluster_connection = 2; +} + +message AddOrUpdateRemoteClusterResponse { +} + +message RemoveRemoteClusterRequest { + // Remote cluster name to be removed. + string cluster_name = 1; +} + +message RemoveRemoteClusterResponse { +} + +message ListClustersRequest { + int32 page_size = 1; + bytes next_page_token = 2; +} + +message ListClustersResponse { + // List of all cluster information + repeated ClusterMetadata clusters = 1; + bytes next_page_token = 4; +} + +message ClusterMetadata { + // Name of the cluster name. + string cluster_name = 1; + // Id of the cluster. + string cluster_id = 2; + // Cluster accessible address. + string address = 3; + // A unique failover version across all connected clusters. + int64 initial_failover_version = 4; + // History service shard number. + int32 history_shard_count = 5; + // A flag to indicate if a connection is active. + bool is_connection_enabled = 6; +} diff --git a/.temporal-api/temporal/api/operatorservice/v1/service.proto b/.temporal-api/temporal/api/operatorservice/v1/service.proto new file mode 100644 index 0000000..0dfd3c4 --- /dev/null +++ b/.temporal-api/temporal/api/operatorservice/v1/service.proto @@ -0,0 +1,84 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.operatorservice.v1; + +option go_package = "go.temporal.io/api/operatorservice/v1;operatorservice"; +option java_package = "io.temporal.api.operatorservice.v1"; +option java_multiple_files = true; +option java_outer_classname = "ServiceProto"; +option ruby_package = "Temporalio::Api::OperatorService::V1"; +option csharp_namespace = "Temporalio.Api.OperatorService.V1"; + + +import "temporal/api/operatorservice/v1/request_response.proto"; +import "google/api/annotations.proto"; + +// OperatorService API defines how Temporal SDKs and other clients interact with the Temporal server +// to perform administrative functions like registering a search attribute or a namespace. +// APIs in this file could be not compatible with Temporal Cloud, hence it's usage in SDKs should be limited by +// designated APIs that clearly state that they shouldn't be used by the main Application (Workflows & Activities) framework. +service OperatorService { + // (-- Search Attribute --) + + // AddSearchAttributes add custom search attributes. + // + // Returns ALREADY_EXISTS status code if a Search Attribute with any of the specified names already exists + // Returns INTERNAL status code with temporal.api.errordetails.v1.SystemWorkflowFailure in Error Details if registration process fails, + rpc AddSearchAttributes (AddSearchAttributesRequest) returns (AddSearchAttributesResponse) { + } + + // RemoveSearchAttributes removes custom search attributes. + // + // Returns NOT_FOUND status code if a Search Attribute with any of the specified names is not registered + rpc RemoveSearchAttributes (RemoveSearchAttributesRequest) returns (RemoveSearchAttributesResponse) { + } + + // ListSearchAttributes returns comprehensive information about search attributes. + rpc ListSearchAttributes (ListSearchAttributesRequest) returns (ListSearchAttributesResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/search-attributes", + }; + } + + // DeleteNamespace synchronously deletes a namespace and asynchronously reclaims all namespace resources. + // (-- api-linter: core::0135::method-signature=disabled + // aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) + // (-- api-linter: core::0135::response-message-name=disabled + // aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) + rpc DeleteNamespace (DeleteNamespaceRequest) returns (DeleteNamespaceResponse) { + } + + // AddOrUpdateRemoteCluster adds or updates remote cluster. + rpc AddOrUpdateRemoteCluster(AddOrUpdateRemoteClusterRequest) returns (AddOrUpdateRemoteClusterResponse) { + } + + // RemoveRemoteCluster removes remote cluster. + rpc RemoveRemoteCluster(RemoveRemoteClusterRequest) returns (RemoveRemoteClusterResponse) { + } + + // ListClusters returns information about Temporal clusters. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + } +} diff --git a/.temporal-api/temporal/api/protocol/v1/message.proto b/.temporal-api/temporal/api/protocol/v1/message.proto new file mode 100644 index 0000000..31fc90f --- /dev/null +++ b/.temporal-api/temporal/api/protocol/v1/message.proto @@ -0,0 +1,57 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.protocol.v1; + +option go_package = "go.temporal.io/api/protocol/v1;protocol"; +option java_package = "io.temporal.api.protocol.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Protocol::V1"; +option csharp_namespace = "Temporalio.Api.Protocol.V1"; + +import "google/protobuf/any.proto"; + +// (-- api-linter: core::0146::any=disabled +// aip.dev/not-precedent: We want runtime extensibility for the body field --) +message Message { + // An ID for this specific message. + string id = 1; + + // Identifies the specific instance of a protocol to which this message + // belongs. + string protocol_instance_id = 2; + + // The event ID or command ID after which this message can be delivered. The + // effects of history up to and including this event ID should be visible to + // the code that handles this message. Omit to opt out of sequencing. + oneof sequencing_id { + int64 event_id = 3; + int64 command_index = 4; + }; + + // The opaque data carried by this message. The protocol type can be + // extracted from the package name of the message carried inside the Any. + google.protobuf.Any body = 5; +} diff --git a/.temporal-api/temporal/api/query/v1/message.proto b/.temporal-api/temporal/api/query/v1/message.proto new file mode 100644 index 0000000..d3a4786 --- /dev/null +++ b/.temporal-api/temporal/api/query/v1/message.proto @@ -0,0 +1,61 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.query.v1; + +option go_package = "go.temporal.io/api/query/v1;query"; +option java_package = "io.temporal.api.query.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Query::V1"; +option csharp_namespace = "Temporalio.Api.Query.V1"; + +import "temporal/api/enums/v1/query.proto"; +import "temporal/api/enums/v1/workflow.proto"; +import "temporal/api/common/v1/message.proto"; + +// See https://docs.temporal.io/docs/concepts/queries/ +message WorkflowQuery { + // The workflow-author-defined identifier of the query. Typically a function name. + string query_type = 1; + // Serialized arguments that will be provided to the query handler. + temporal.api.common.v1.Payloads query_args = 2; + // Headers that were passed by the caller of the query and copied by temporal + // server into the workflow task. + temporal.api.common.v1.Header header = 3; +} + +// Answer to a `WorkflowQuery` +message WorkflowQueryResult { + // Did the query succeed or fail? + temporal.api.enums.v1.QueryResultType result_type = 1; + // Set when the query succeeds with the results + temporal.api.common.v1.Payloads answer = 2; + // Mutually exclusive with `answer`. Set when the query fails. + string error_message = 3; +} + +message QueryRejected { + temporal.api.enums.v1.WorkflowExecutionStatus status = 1; +} diff --git a/.temporal-api/temporal/api/replication/v1/message.proto b/.temporal-api/temporal/api/replication/v1/message.proto new file mode 100644 index 0000000..3da7fbc --- /dev/null +++ b/.temporal-api/temporal/api/replication/v1/message.proto @@ -0,0 +1,55 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.replication.v1; + +option go_package = "go.temporal.io/api/replication/v1;replication"; +option java_package = "io.temporal.api.replication.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Replication::V1"; +option csharp_namespace = "Temporalio.Api.Replication.V1"; + +import "google/protobuf/timestamp.proto"; + +import "dependencies/gogoproto/gogo.proto"; + +import "temporal/api/enums/v1/namespace.proto"; + +message ClusterReplicationConfig { + string cluster_name = 1; +} + +message NamespaceReplicationConfig { + string active_cluster_name = 1; + repeated ClusterReplicationConfig clusters = 2; + temporal.api.enums.v1.ReplicationState state = 3; +} + +// Represents a historical replication status of a Namespace +message FailoverStatus { + // Timestamp when the Cluster switched to the following failover_version + google.protobuf.Timestamp failover_time = 1 [(gogoproto.stdtime) = true]; + int64 failover_version = 2; +} diff --git a/.temporal-api/temporal/api/schedule/v1/message.proto b/.temporal-api/temporal/api/schedule/v1/message.proto new file mode 100644 index 0000000..197c260 --- /dev/null +++ b/.temporal-api/temporal/api/schedule/v1/message.proto @@ -0,0 +1,397 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// (-- api-linter: core::0203::optional=disabled +// aip.dev/not-precedent: field_behavior annotation not available in our gogo fork --) +// (-- api-linter: core::0203::input-only=disabled +// aip.dev/not-precedent: field_behavior annotation not available in our gogo fork --) + +syntax = "proto3"; + +package temporal.api.schedule.v1; + +option go_package = "go.temporal.io/api/schedule/v1;schedule"; +option java_package = "io.temporal.api.schedule.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Schedule::V1"; +option csharp_namespace = "Temporalio.Api.Schedule.V1"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +import "dependencies/gogoproto/gogo.proto"; + +import "temporal/api/common/v1/message.proto"; +import "temporal/api/enums/v1/schedule.proto"; +import "temporal/api/workflow/v1/message.proto"; + +// CalendarSpec describes an event specification relative to the calendar, +// similar to a traditional cron specification, but with labeled fields. Each +// field can be one of: +// *: matches always +// x: matches when the field equals x +// x/y : matches when the field equals x+n*y where n is an integer +// x-z: matches when the field is between x and z inclusive +// w,x,y,...: matches when the field is one of the listed values +// Each x, y, z, ... is either a decimal integer, or a month or day of week name +// or abbreviation (in the appropriate fields). +// A timestamp matches if all fields match. +// Note that fields have different default values, for convenience. +// Note that the special case that some cron implementations have for treating +// day_of_month and day_of_week as "or" instead of "and" when both are set is +// not implemented. +// day_of_week can accept 0 or 7 as Sunday +// CalendarSpec gets compiled into StructuredCalendarSpec, which is what will be +// returned if you describe the schedule. +message CalendarSpec { + // Expression to match seconds. Default: 0 + string second = 1; + // Expression to match minutes. Default: 0 + string minute = 2; + // Expression to match hours. Default: 0 + string hour = 3; + // Expression to match days of the month. Default: * + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: standard name of field --) + string day_of_month = 4; + // Expression to match months. Default: * + string month = 5; + // Expression to match years. Default: * + string year = 6; + // Expression to match days of the week. Default: * + string day_of_week = 7; + // Free-form comment describing the intention of this spec. + string comment = 8; +} + +// Range represents a set of integer values, used to match fields of a calendar +// time in StructuredCalendarSpec. If end < start, then end is interpreted as +// equal to start. This means you can use a Range with start set to a value, and +// end and step unset (defaulting to 0) to represent a single value. +message Range { + // Start of range (inclusive). + int32 start = 1; + // End of range (inclusive). + int32 end = 2; + // Step (optional, default 1). + int32 step = 3; +} + +// StructuredCalendarSpec describes an event specification relative to the +// calendar, in a form that's easy to work with programmatically. Each field can +// be one or more ranges. +// A timestamp matches if at least one range of each field matches the +// corresponding fields of the timestamp, except for year: if year is missing, +// that means all years match. For all fields besides year, at least one Range +// must be present to match anything. +// TODO: add relative-to-end-of-month +// TODO: add nth day-of-week in month +message StructuredCalendarSpec { + // Match seconds (0-59) + repeated Range second = 1; + // Match minutes (0-59) + repeated Range minute = 2; + // Match hours (0-23) + repeated Range hour = 3; + // Match days of the month (1-31) + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: standard name of field --) + repeated Range day_of_month = 4; + // Match months (1-12) + repeated Range month = 5; + // Match years. + repeated Range year = 6; + // Match days of the week (0-6; 0 is Sunday). + repeated Range day_of_week = 7; + // Free-form comment describing the intention of this spec. + string comment = 8; +} + +// IntervalSpec matches times that can be expressed as: +// epoch + n * interval + phase +// where n is an integer. +// phase defaults to zero if missing. interval is required. +// Both interval and phase must be non-negative and are truncated to the nearest +// second before any calculations. +// For example, an interval of 1 hour with phase of zero would match every hour, +// on the hour. The same interval but a phase of 19 minutes would match every +// xx:19:00. An interval of 28 days with phase zero would match +// 2022-02-17T00:00:00Z (among other times). The same interval with a phase of 3 +// days, 5 hours, and 23 minutes would match 2022-02-20T05:23:00Z instead. +message IntervalSpec { + google.protobuf.Duration interval = 1 [(gogoproto.stdduration) = true]; + google.protobuf.Duration phase = 2 [(gogoproto.stdduration) = true]; +} + +// ScheduleSpec is a complete description of a set of absolute timestamps +// (possibly infinite) that an action should occur at. The meaning of a +// ScheduleSpec depends only on its contents and never changes, except that the +// definition of a time zone can change over time (most commonly, when daylight +// saving time policy changes for an area). To create a totally self-contained +// ScheduleSpec, use UTC or include timezone_data. +// +// For input, you can provide zero or more of: structured_calendar, calendar, +// cron_string, interval, and exclude_structured_calendar, and all of them will +// be used (the schedule will take action at the union of all of their times, +// minus the ones that match exclude_structured_calendar). +// +// On input, calendar and cron_string fields will be compiled into +// structured_calendar (and maybe interval and timezone_name), so if you +// Describe a schedule, you'll see only structured_calendar, interval, etc. +// +// If a spec has no matching times after the current time, then the schedule +// will be subject to automatic deletion (after several days). +message ScheduleSpec { + // Calendar-based specifications of times. + repeated StructuredCalendarSpec structured_calendar = 7; + // cron_string holds a traditional cron specification as a string. It + // accepts 5, 6, or 7 fields, separated by spaces, and interprets them the + // same way as CalendarSpec. + // 5 fields: minute, hour, day_of_month, month, day_of_week + // 6 fields: minute, hour, day_of_month, month, day_of_week, year + // 7 fields: second, minute, hour, day_of_month, month, day_of_week, year + // If year is not given, it defaults to *. If second is not given, it + // defaults to 0. + // Shorthands @yearly, @monthly, @weekly, @daily, and @hourly are also + // accepted instead of the 5-7 time fields. + // Optionally, the string can be preceded by CRON_TZ= or + // TZ=, which will get copied to timezone_name. (There must + // not also be a timezone_name present.) + // Optionally "#" followed by a comment can appear at the end of the string. + // Note that the special case that some cron implementations have for + // treating day_of_month and day_of_week as "or" instead of "and" when both + // are set is not implemented. + // @every [/] is accepted and gets compiled into an + // IntervalSpec instead. and should be a decimal integer + // with a unit suffix s, m, h, or d. + repeated string cron_string = 8; + // Calendar-based specifications of times. + repeated CalendarSpec calendar = 1; + // Interval-based specifications of times. + repeated IntervalSpec interval = 2; + // Any timestamps matching any of exclude_* will be skipped. + repeated CalendarSpec exclude_calendar = 3 [deprecated = true]; // use exclude_structured_calendar + repeated StructuredCalendarSpec exclude_structured_calendar = 9; + // If start_time is set, any timestamps before start_time will be skipped. + // (Together, start_time and end_time make an inclusive interval.) + google.protobuf.Timestamp start_time = 4 [(gogoproto.stdtime) = true]; + // If end_time is set, any timestamps after end_time will be skipped. + google.protobuf.Timestamp end_time = 5 [(gogoproto.stdtime) = true]; + // All timestamps will be incremented by a random value from 0 to this + // amount of jitter. Default: 0 + google.protobuf.Duration jitter = 6 [(gogoproto.stdduration) = true]; + + // Time zone to interpret all calendar-based specs in. + // + // If unset, defaults to UTC. We recommend using UTC for your application if + // at all possible, to avoid various surprising properties of time zones. + // + // Time zones may be provided by name, corresponding to names in the IANA + // time zone database (see https://www.iana.org/time-zones). The definition + // will be loaded by the Temporal server from the environment it runs in. + // + // If your application requires more control over the time zone definition + // used, it may pass in a complete definition in the form of a TZif file + // from the time zone database. If present, this will be used instead of + // loading anything from the environment. You are then responsible for + // updating timezone_data when the definition changes. + // + // Calendar spec matching is based on literal matching of the clock time + // with no special handling of DST: if you write a calendar spec that fires + // at 2:30am and specify a time zone that follows DST, that action will not + // be triggered on the day that has no 2:30am. Similarly, an action that + // fires at 1:30am will be triggered twice on the day that has two 1:30s. + // + // Also note that no actions are taken on leap-seconds (e.g. 23:59:60 UTC). + string timezone_name = 10; + bytes timezone_data = 11; +} + +message SchedulePolicies { + // Policy for overlaps. + // Note that this can be changed after a schedule has taken some actions, + // and some changes might produce unintuitive results. In general, the later + // policy overrides the earlier policy. + temporal.api.enums.v1.ScheduleOverlapPolicy overlap_policy = 1; + + // Policy for catchups: + // If the Temporal server misses an action due to one or more components + // being down, and comes back up, the action will be run if the scheduled + // time is within this window from the current time. + // This value defaults to one year, and can't be less than 10 seconds. + google.protobuf.Duration catchup_window = 2 [(gogoproto.stdduration) = true]; + + // If true, and a workflow run fails or times out, turn on "paused". + // This applies after retry policies: the full chain of retries must fail to + // trigger a pause here. + bool pause_on_failure = 3; +} + +message ScheduleAction { + oneof action { + // All fields of NewWorkflowExecutionInfo are valid except for: + // - workflow_id_reuse_policy + // - cron_schedule + // The workflow id of the started workflow may not match this exactly, + // it may have a timestamp appended for uniqueness. + temporal.api.workflow.v1.NewWorkflowExecutionInfo start_workflow = 1; + } +} + +message ScheduleActionResult { + // Time that the action was taken (according to the schedule, including jitter). + google.protobuf.Timestamp schedule_time = 1 [(gogoproto.stdtime) = true]; + + // Time that the action was taken (real time). + google.protobuf.Timestamp actual_time = 2 [(gogoproto.stdtime) = true]; + + // If action was start_workflow: + temporal.api.common.v1.WorkflowExecution start_workflow_result = 11; +} + +message ScheduleState { + // Informative human-readable message with contextual notes, e.g. the reason + // a schedule is paused. The system may overwrite this message on certain + // conditions, e.g. when pause-on-failure happens. + string notes = 1; + + // If true, do not take any actions based on the schedule spec. + bool paused = 2; + + // If limited_actions is true, decrement remaining_actions after each + // action, and do not take any more scheduled actions if remaining_actions + // is zero. Actions may still be taken by explicit request (i.e. trigger + // immediately or backfill). Skipped actions (due to overlap policy) do not + // count against remaining actions. + // If a schedule has no more remaining actions, then the schedule will be + // subject to automatic deletion (after several days). + bool limited_actions = 3; + int64 remaining_actions = 4; +} + +message TriggerImmediatelyRequest { + // If set, override overlap policy for this one request. + temporal.api.enums.v1.ScheduleOverlapPolicy overlap_policy = 1; +} + +message BackfillRequest { + // Time range to evaluate schedule in. Currently, this time range is + // exclusive on start_time and inclusive on end_time. (This is admittedly + // counterintuitive and it may change in the future, so to be safe, use a + // start time strictly before a scheduled time.) Also note that an action + // nominally scheduled in the interval but with jitter that pushes it after + // end_time will not be included. + google.protobuf.Timestamp start_time = 1 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp end_time = 2 [(gogoproto.stdtime) = true]; + // If set, override overlap policy for this request. + temporal.api.enums.v1.ScheduleOverlapPolicy overlap_policy = 3; +} + +message SchedulePatch { + // If set, trigger one action immediately. + TriggerImmediatelyRequest trigger_immediately = 1; + + // If set, runs though the specified time period(s) and takes actions as if that time + // passed by right now, all at once. The overlap policy can be overridden for the + // scope of the backfill. + repeated BackfillRequest backfill_request = 2; + + // If set, change the state to paused or unpaused (respectively) and set the + // notes field to the value of the string. + string pause = 3; + string unpause = 4; +} + +message ScheduleInfo { + // Number of actions taken so far. + int64 action_count = 1; + + // Number of times a scheduled action was skipped due to missing the catchup window. + int64 missed_catchup_window = 2; + + // Number of skipped actions due to overlap. + int64 overlap_skipped = 3; + + // Number of dropped actions due to buffer limit. + int64 buffer_dropped = 10; + + // Number of actions in the buffer. The buffer holds the actions that cannot + // be immediately triggered (due to the overlap policy). These actions can be a result of + // the normal schedule or a backfill. + int64 buffer_size = 11; + + // Currently-running workflows started by this schedule. (There might be + // more than one if the overlap policy allows overlaps.) + // Note that the run_ids in here are the original execution run ids as + // started by the schedule. If the workflows retried, did continue-as-new, + // or were reset, they might still be running but with a different run_id. + repeated temporal.api.common.v1.WorkflowExecution running_workflows = 9; + + // Most recent ten actual action times (including manual triggers). + repeated ScheduleActionResult recent_actions = 4; + + // Next ten scheduled action times. + repeated google.protobuf.Timestamp future_action_times = 5 [(gogoproto.stdtime) = true]; + + // Timestamps of schedule creation and last update. + google.protobuf.Timestamp create_time = 6 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp update_time = 7 [(gogoproto.stdtime) = true]; + + string invalid_schedule_error = 8 [deprecated = true]; +} + +message Schedule { + ScheduleSpec spec = 1; + ScheduleAction action = 2; + SchedulePolicies policies = 3; + ScheduleState state = 4; +} + +// ScheduleListInfo is an abbreviated set of values from Schedule and ScheduleInfo +// that's returned in ListSchedules. +message ScheduleListInfo { + // From spec: + // Some fields are dropped from this copy of spec: timezone_data + ScheduleSpec spec = 1; + + // From action: + // Action is a oneof field, but we need to encode this in JSON and oneof fields don't work + // well with JSON. If action is start_workflow, this is set: + temporal.api.common.v1.WorkflowType workflow_type = 2; + + // From state: + string notes = 3; + bool paused = 4; + + // From info (maybe fewer entries): + repeated ScheduleActionResult recent_actions = 5; + repeated google.protobuf.Timestamp future_action_times = 6 [(gogoproto.stdtime) = true]; +} + +// ScheduleListEntry is returned by ListSchedules. +message ScheduleListEntry { + string schedule_id = 1; + temporal.api.common.v1.Memo memo = 2; + temporal.api.common.v1.SearchAttributes search_attributes = 3; + ScheduleListInfo info = 4; +} diff --git a/.temporal-api/temporal/api/sdk/v1/task_complete_metadata.proto b/.temporal-api/temporal/api/sdk/v1/task_complete_metadata.proto new file mode 100644 index 0000000..8be7b89 --- /dev/null +++ b/.temporal-api/temporal/api/sdk/v1/task_complete_metadata.proto @@ -0,0 +1,76 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.sdk.v1; + +option go_package = "go.temporal.io/api/sdk/v1;sdk"; +option java_package = "io.temporal.api.sdk.v1"; +option java_multiple_files = true; +option java_outer_classname = "TaskCompleteMetadataProto"; +option ruby_package = "Temporalio::Api::Sdk::V1"; +option csharp_namespace = "Temporalio.Api.Sdk.V1"; + +message WorkflowTaskCompletedMetadata { + // Internal flags used by the core SDK. SDKs using flags must comply with the following behavior: + // + // During replay: + // * If a flag is not recognized (value is too high or not defined), it must fail the workflow + // task. + // * If a flag is recognized, it is stored in a set of used flags for the run. Code checks for + // that flag during and after this WFT are allowed to assume that the flag is present. + // * If a code check for a flag does not find the flag in the set of used flags, it must take + // the branch corresponding to the absence of that flag. + // + // During non-replay execution of new WFTs: + // * The SDK is free to use all flags it knows about. It must record any newly-used (IE: not + // previously recorded) flags when completing the WFT. + // + // SDKs which are too old to even know about this field at all are considered to produce + // undefined behavior if they replay workflows which used this mechanism. + // + // (-- api-linter: core::0141::forbidden-types=disabled + // aip.dev/not-precedent: These really shouldn't have negative values. --) + repeated uint32 core_used_flags = 1; + + // Flags used by the SDK lang. No attempt is made to distinguish between different SDK languages + // here as processing a workflow with a different language than the one which authored it is + // already undefined behavior. See `core_used_patches` for more. + // + // (-- api-linter: core::0141::forbidden-types=disabled + // aip.dev/not-precedent: These really shouldn't have negative values. --) + repeated uint32 lang_used_flags = 2; + + // Name of the SDK that processed the task. This is usually something like "temporal-go" and is + // usually the same as client-name gRPC header. This should only be set if its value changed + // since the last time recorded on the workflow (or be set on the first task). + // + // (-- api-linter: core::0122::name-suffix=disabled + // aip.dev/not-precedent: We're ok with a name suffix here. --) + string sdk_name = 3; + + // Version of the SDK that processed the task. This is usually something like "1.20.0" and is + // usually the same as client-version gRPC header. This should only be set if its value changed + // since the last time recorded on the workflow (or be set on the first task). + string sdk_version = 4; +} \ No newline at end of file diff --git a/.temporal-api/temporal/api/taskqueue/v1/message.proto b/.temporal-api/temporal/api/taskqueue/v1/message.proto new file mode 100644 index 0000000..e8f027d --- /dev/null +++ b/.temporal-api/temporal/api/taskqueue/v1/message.proto @@ -0,0 +1,115 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.taskqueue.v1; + +option go_package = "go.temporal.io/api/taskqueue/v1;taskqueue"; +option java_package = "io.temporal.api.taskqueue.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::TaskQueue::V1"; +option csharp_namespace = "Temporalio.Api.TaskQueue.V1"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +import "dependencies/gogoproto/gogo.proto"; + +import "temporal/api/enums/v1/task_queue.proto"; +import "temporal/api/common/v1/message.proto"; + +// See https://docs.temporal.io/docs/concepts/task-queues/ +message TaskQueue { + string name = 1; + // Default: TASK_QUEUE_KIND_NORMAL. + temporal.api.enums.v1.TaskQueueKind kind = 2; + // Iff kind == TASK_QUEUE_KIND_STICKY, then this field contains the name of + // the normal task queue that the sticky worker is running on. + string normal_name = 3; +} + +// Only applies to activity task queues +message TaskQueueMetadata { + // Allows throttling dispatch of tasks from this queue + google.protobuf.DoubleValue max_tasks_per_second = 1; +} + +message TaskQueueStatus { + int64 backlog_count_hint = 1; + int64 read_level = 2; + int64 ack_level = 3; + double rate_per_second = 4; + TaskIdBlock task_id_block = 5; +} + +message TaskIdBlock { + int64 start_id = 1; + int64 end_id = 2; +} + +message TaskQueuePartitionMetadata { + string key = 1; + string owner_host_name = 2; +} + +message PollerInfo { + google.protobuf.Timestamp last_access_time = 1 [(gogoproto.stdtime) = true]; + string identity = 2; + double rate_per_second = 3; + // If a worker has opted into the worker versioning feature while polling, its capabilities will + // appear here. + temporal.api.common.v1.WorkerVersionCapabilities worker_version_capabilities = 4; +} + +message StickyExecutionAttributes { + TaskQueue worker_task_queue = 1; + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration schedule_to_start_timeout = 2 [(gogoproto.stdduration) = true]; +} + +// Used by the worker versioning APIs, represents an unordered set of one or more versions which are +// considered to be compatible with each other. Currently the versions are always worker build IDs. +message CompatibleVersionSet { + // All the compatible versions, unordered, except for the last element, which is considered the set "default". + repeated string build_ids = 1; +} + +// Reachability of tasks for a worker on a single task queue. +message TaskQueueReachability { + string task_queue = 1; + // Task reachability for a worker in a single task queue. + // See the TaskReachability docstring for information about each enum variant. + // If reachability is empty, this worker is considered unreachable in this task queue. + repeated temporal.api.enums.v1.TaskReachability reachability = 2; +} + +// Reachability of tasks for a worker by build id, in one or more task queues. +message BuildIdReachability { + // A build id or empty if unversioned. + string build_id = 1; + // Reachability per task queue. + repeated TaskQueueReachability task_queue_reachability = 2; +} diff --git a/.temporal-api/temporal/api/update/v1/message.proto b/.temporal-api/temporal/api/update/v1/message.proto new file mode 100644 index 0000000..db6f9ce --- /dev/null +++ b/.temporal-api/temporal/api/update/v1/message.proto @@ -0,0 +1,111 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.update.v1; + +option go_package = "go.temporal.io/api/update/v1;update"; +option java_package = "io.temporal.api.update.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Update::V1"; +option csharp_namespace = "Temporalio.Api.Update.V1"; + +import "temporal/api/common/v1/message.proto"; +import "temporal/api/enums/v1/update.proto"; +import "temporal/api/failure/v1/message.proto"; + +// Specifies to the gRPC server how long the client wants the an update-related +// RPC call to wait before returning control to the caller. +message WaitPolicy { + + // Indicates the update lifecycle stage that the gRPC call should wait for + // before returning. + temporal.api.enums.v1.UpdateWorkflowExecutionLifecycleStage lifecycle_stage = 1; +} + +// The data needed by a client to refer to a previously invoked workflow +// execution update process. +message UpdateRef { + temporal.api.common.v1.WorkflowExecution workflow_execution = 1; + string update_id = 2; +} + +// The outcome of a workflow update - success or failure. +message Outcome { + oneof value { + temporal.api.common.v1.Payloads success = 1; + temporal.api.failure.v1.Failure failure = 2; + } +} + +// Metadata about a workflow execution update. +message Meta { + // An ID with workflow-scoped uniqueness for this update + string update_id = 1; + + // A string identifying the agent that requested this update. + string identity = 2; +} + +message Input { + // Headers that are passed with the update from the requesting entity. + // These can include things like auth or tracing tokens. + temporal.api.common.v1.Header header = 1; + + // The name of the input handler to invoke on the target workflow + string name = 2; + + // The arguments to pass to the named handler. + temporal.api.common.v1.Payloads args = 3; +} + +// The client request that triggers a workflow execution update +message Request { + Meta meta = 1; + Input input = 2; +} + +// An update protocol message indicating that a workflow execution update has +// been rejected. +message Rejection { + string rejected_request_message_id = 1; + int64 rejected_request_sequencing_event_id = 2; + Request rejected_request = 3; + temporal.api.failure.v1.Failure failure = 4; +} + +// An update protocol message indicating that a workflow execution update has +// been accepted (i.e. passed the worker-side validation phase). +message Acceptance { + string accepted_request_message_id = 1; + int64 accepted_request_sequencing_event_id = 2; + Request accepted_request = 3; +} + +// An update protocol message indicating that a workflow execution update has +// completed with the contained outcome. +message Response { + Meta meta = 1; + Outcome outcome = 2; +} diff --git a/.temporal-api/temporal/api/version/v1/message.proto b/.temporal-api/temporal/api/version/v1/message.proto new file mode 100644 index 0000000..7c2d360 --- /dev/null +++ b/.temporal-api/temporal/api/version/v1/message.proto @@ -0,0 +1,59 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.version.v1; + +option go_package = "go.temporal.io/api/version/v1;version"; +option java_package = "io.temporal.api.version.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Version::V1"; +option csharp_namespace = "Temporalio.Api.Version.V1"; + +import "google/protobuf/timestamp.proto"; +import "dependencies/gogoproto/gogo.proto"; +import "temporal/api/enums/v1/common.proto"; + +// ReleaseInfo contains information about specific version of temporal. +message ReleaseInfo { + string version = 1; + google.protobuf.Timestamp release_time = 2 [(gogoproto.stdtime) = true]; + string notes = 3; +} + +// Alert contains notification and severity. +message Alert { + string message = 1; + temporal.api.enums.v1.Severity severity = 2; +} + +// VersionInfo contains details about current and recommended release versions as well as alerts and upgrade instructions. +message VersionInfo { + ReleaseInfo current = 1; + ReleaseInfo recommended = 2; + string instructions = 3; + repeated Alert alerts = 4; + google.protobuf.Timestamp last_update_time = 5 [(gogoproto.stdtime) = true]; +} + diff --git a/.temporal-api/temporal/api/workflow/v1/message.proto b/.temporal-api/temporal/api/workflow/v1/message.proto new file mode 100644 index 0000000..bb83f40 --- /dev/null +++ b/.temporal-api/temporal/api/workflow/v1/message.proto @@ -0,0 +1,152 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.workflow.v1; + +option go_package = "go.temporal.io/api/workflow/v1;workflow"; +option java_package = "io.temporal.api.workflow.v1"; +option java_multiple_files = true; +option java_outer_classname = "MessageProto"; +option ruby_package = "Temporalio::Api::Workflow::V1"; +option csharp_namespace = "Temporalio.Api.Workflow.V1"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +import "dependencies/gogoproto/gogo.proto"; + +import "temporal/api/enums/v1/workflow.proto"; +import "temporal/api/common/v1/message.proto"; +import "temporal/api/failure/v1/message.proto"; +import "temporal/api/taskqueue/v1/message.proto"; + +message WorkflowExecutionInfo { + temporal.api.common.v1.WorkflowExecution execution = 1; + temporal.api.common.v1.WorkflowType type = 2; + google.protobuf.Timestamp start_time = 3 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp close_time = 4 [(gogoproto.stdtime) = true]; + temporal.api.enums.v1.WorkflowExecutionStatus status = 5; + int64 history_length = 6; + string parent_namespace_id = 7; + temporal.api.common.v1.WorkflowExecution parent_execution = 8; + google.protobuf.Timestamp execution_time = 9 [(gogoproto.stdtime) = true]; + temporal.api.common.v1.Memo memo = 10; + temporal.api.common.v1.SearchAttributes search_attributes = 11; + ResetPoints auto_reset_points = 12; + string task_queue = 13; + int64 state_transition_count = 14; + int64 history_size_bytes = 15; + // If set, the most recent worker version stamp that appeared in a workflow task completion + temporal.api.common.v1.WorkerVersionStamp most_recent_worker_version_stamp = 16; +} + +message WorkflowExecutionConfig { + temporal.api.taskqueue.v1.TaskQueue task_queue = 1; + google.protobuf.Duration workflow_execution_timeout = 2 [(gogoproto.stdduration) = true]; + google.protobuf.Duration workflow_run_timeout = 3 [(gogoproto.stdduration) = true]; + google.protobuf.Duration default_workflow_task_timeout = 4 [(gogoproto.stdduration) = true]; +} + +message PendingActivityInfo { + string activity_id = 1; + temporal.api.common.v1.ActivityType activity_type = 2; + temporal.api.enums.v1.PendingActivityState state = 3; + temporal.api.common.v1.Payloads heartbeat_details = 4; + google.protobuf.Timestamp last_heartbeat_time = 5 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp last_started_time = 6 [(gogoproto.stdtime) = true]; + int32 attempt = 7; + int32 maximum_attempts = 8; + google.protobuf.Timestamp scheduled_time = 9 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp expiration_time = 10 [(gogoproto.stdtime) = true]; + temporal.api.failure.v1.Failure last_failure = 11; + string last_worker_identity = 12; +} + +message PendingChildExecutionInfo { + string workflow_id = 1; + string run_id = 2; + string workflow_type_name = 3; + int64 initiated_id = 4; + // Default: PARENT_CLOSE_POLICY_TERMINATE. + temporal.api.enums.v1.ParentClosePolicy parent_close_policy = 5; +} + +message PendingWorkflowTaskInfo { + temporal.api.enums.v1.PendingWorkflowTaskState state = 1; + google.protobuf.Timestamp scheduled_time = 2 [(gogoproto.stdtime) = true]; + // original_scheduled_time is the scheduled time of the first workflow task during workflow task heartbeat. + // Heartbeat workflow task is done by RespondWorkflowTaskComplete with ForceCreateNewWorkflowTask == true and no command + // In this case, OriginalScheduledTime won't change. Then when current time - original_scheduled_time exceeds + // some threshold, the workflow task will be forced timeout. + google.protobuf.Timestamp original_scheduled_time = 3 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp started_time = 4 [(gogoproto.stdtime) = true]; + int32 attempt = 5; +} + +message ResetPoints { + repeated ResetPointInfo points = 1; +} + +message ResetPointInfo { + // A worker binary version identifier, will be deprecated and superseded by a newer concept of + // build_id. + string binary_checksum = 1; + // The first run ID in the execution chain that was touched by this worker build. + string run_id = 2; + // Event ID of the first WorkflowTaskCompleted event processed by this worker build. + int64 first_workflow_task_completed_id = 3; + google.protobuf.Timestamp create_time = 4 [(gogoproto.stdtime) = true]; + // (-- api-linter: core::0214::resource-expiry=disabled + // aip.dev/not-precedent: TTL is not defined for ResetPointInfo. --) + // The time that the run is deleted due to retention. + google.protobuf.Timestamp expire_time = 5 [(gogoproto.stdtime) = true]; + // false if the reset point has pending childWFs/reqCancels/signalExternals. + bool resettable = 6; +} + +// NewWorkflowExecutionInfo is a shared message that encapsulates all the +// required arguments to starting a workflow in different contexts. +message NewWorkflowExecutionInfo { + string workflow_id = 1; + temporal.api.common.v1.WorkflowType workflow_type = 2; + temporal.api.taskqueue.v1.TaskQueue task_queue = 3; + // Serialized arguments to the workflow. + temporal.api.common.v1.Payloads input = 4; + // Total workflow execution timeout including retries and continue as new. + google.protobuf.Duration workflow_execution_timeout = 5 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow run. + google.protobuf.Duration workflow_run_timeout = 6 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow task. + google.protobuf.Duration workflow_task_timeout = 7 [(gogoproto.stdduration) = true]; + // Default: WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE. + temporal.api.enums.v1.WorkflowIdReusePolicy workflow_id_reuse_policy = 8; + // The retry policy for the workflow. Will never exceed `workflow_execution_timeout`. + temporal.api.common.v1.RetryPolicy retry_policy = 9; + // See https://docs.temporal.io/docs/content/what-is-a-temporal-cron-job/ + string cron_schedule = 10; + temporal.api.common.v1.Memo memo = 11; + temporal.api.common.v1.SearchAttributes search_attributes = 12; + temporal.api.common.v1.Header header = 13; +} + diff --git a/.temporal-api/temporal/api/workflowservice/v1/request_response.proto b/.temporal-api/temporal/api/workflowservice/v1/request_response.proto new file mode 100644 index 0000000..db4aa42 --- /dev/null +++ b/.temporal-api/temporal/api/workflowservice/v1/request_response.proto @@ -0,0 +1,1380 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.workflowservice.v1; + +option go_package = "go.temporal.io/api/workflowservice/v1;workflowservice"; +option java_package = "io.temporal.api.workflowservice.v1"; +option java_multiple_files = true; +option java_outer_classname = "RequestResponseProto"; +option ruby_package = "Temporalio::Api::WorkflowService::V1"; +option csharp_namespace = "Temporalio.Api.WorkflowService.V1"; + +import "temporal/api/enums/v1/batch_operation.proto"; +import "temporal/api/enums/v1/workflow.proto"; +import "temporal/api/enums/v1/namespace.proto"; +import "temporal/api/enums/v1/failed_cause.proto"; +import "temporal/api/enums/v1/common.proto"; +import "temporal/api/enums/v1/query.proto"; +import "temporal/api/enums/v1/reset.proto"; +import "temporal/api/enums/v1/task_queue.proto"; +import "temporal/api/enums/v1/update.proto"; +import "temporal/api/common/v1/message.proto"; +import "temporal/api/history/v1/message.proto"; +import "temporal/api/workflow/v1/message.proto"; +import "temporal/api/command/v1/message.proto"; +import "temporal/api/failure/v1/message.proto"; +import "temporal/api/filter/v1/message.proto"; +import "temporal/api/protocol/v1/message.proto"; +import "temporal/api/namespace/v1/message.proto"; +import "temporal/api/query/v1/message.proto"; +import "temporal/api/replication/v1/message.proto"; +import "temporal/api/schedule/v1/message.proto"; +import "temporal/api/taskqueue/v1/message.proto"; +import "temporal/api/update/v1/message.proto"; +import "temporal/api/version/v1/message.proto"; +import "temporal/api/batch/v1/message.proto"; +import "temporal/api/sdk/v1/task_complete_metadata.proto"; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +import "dependencies/gogoproto/gogo.proto"; + +message RegisterNamespaceRequest { + string namespace = 1; + string description = 2; + string owner_email = 3; + google.protobuf.Duration workflow_execution_retention_period = 4 [(gogoproto.stdduration) = true]; + repeated temporal.api.replication.v1.ClusterReplicationConfig clusters = 5; + string active_cluster_name = 6; + // A key-value map for any customized purpose. + map data = 7; + string security_token = 8; + bool is_global_namespace = 9; + // If unspecified (ARCHIVAL_STATE_UNSPECIFIED) then default server configuration is used. + temporal.api.enums.v1.ArchivalState history_archival_state = 10; + string history_archival_uri = 11; + // If unspecified (ARCHIVAL_STATE_UNSPECIFIED) then default server configuration is used. + temporal.api.enums.v1.ArchivalState visibility_archival_state = 12; + string visibility_archival_uri = 13; +} + +message RegisterNamespaceResponse { +} + +message ListNamespacesRequest { + int32 page_size = 1; + bytes next_page_token = 2; + temporal.api.namespace.v1.NamespaceFilter namespace_filter = 3; +} + +message ListNamespacesResponse { + repeated DescribeNamespaceResponse namespaces = 1; + bytes next_page_token = 2; +} + +message DescribeNamespaceRequest { + string namespace = 1; + string id = 2; +} + +message DescribeNamespaceResponse { + temporal.api.namespace.v1.NamespaceInfo namespace_info = 1; + temporal.api.namespace.v1.NamespaceConfig config = 2; + temporal.api.replication.v1.NamespaceReplicationConfig replication_config = 3; + int64 failover_version = 4; + bool is_global_namespace = 5; + // Contains the historical state of failover_versions for the cluster, truncated to contain only the last N + // states to ensure that the list does not grow unbounded. + repeated temporal.api.replication.v1.FailoverStatus failover_history = 6; +} + +// (-- api-linter: core::0134::request-mask-required=disabled +// aip.dev/not-precedent: UpdateNamespace RPC doesn't follow Google API format. --) +// (-- api-linter: core::0134::request-resource-required=disabled +// aip.dev/not-precedent: UpdateNamespace RPC doesn't follow Google API format. --) +message UpdateNamespaceRequest { + string namespace = 1; + temporal.api.namespace.v1.UpdateNamespaceInfo update_info = 2; + temporal.api.namespace.v1.NamespaceConfig config = 3; + temporal.api.replication.v1.NamespaceReplicationConfig replication_config = 4; + string security_token = 5; + string delete_bad_binary = 6; + // promote local namespace to global namespace. Ignored if namespace is already global namespace. + bool promote_namespace = 7; +} + +message UpdateNamespaceResponse { + temporal.api.namespace.v1.NamespaceInfo namespace_info = 1; + temporal.api.namespace.v1.NamespaceConfig config = 2; + temporal.api.replication.v1.NamespaceReplicationConfig replication_config = 3; + int64 failover_version = 4; + bool is_global_namespace = 5; +} + +// Deprecated. +message DeprecateNamespaceRequest { + string namespace = 1; + string security_token = 2; +} + +// Deprecated. +message DeprecateNamespaceResponse { +} + +message StartWorkflowExecutionRequest { + string namespace = 1; + string workflow_id = 2; + temporal.api.common.v1.WorkflowType workflow_type = 3; + temporal.api.taskqueue.v1.TaskQueue task_queue = 4; + // Serialized arguments to the workflow. These are passed as arguments to the workflow function. + temporal.api.common.v1.Payloads input = 5; + // Total workflow execution timeout including retries and continue as new. + google.protobuf.Duration workflow_execution_timeout = 6 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow run. + google.protobuf.Duration workflow_run_timeout = 7 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow task. + google.protobuf.Duration workflow_task_timeout = 8 [(gogoproto.stdduration) = true]; + // The identity of the client who initiated this request + string identity = 9; + // A unique identifier for this start request. Typically UUIDv4. + string request_id = 10; + // Default: WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE. + temporal.api.enums.v1.WorkflowIdReusePolicy workflow_id_reuse_policy = 11; + // The retry policy for the workflow. Will never exceed `workflow_execution_timeout`. + temporal.api.common.v1.RetryPolicy retry_policy = 12; + // See https://docs.temporal.io/docs/content/what-is-a-temporal-cron-job/ + string cron_schedule = 13; + temporal.api.common.v1.Memo memo = 14; + temporal.api.common.v1.SearchAttributes search_attributes = 15; + temporal.api.common.v1.Header header = 16; + // Request to get the first workflow task inline in the response bypassing matching service and worker polling. + // If set to `true` the caller is expected to have a worker available and capable of processing the task. + // The returned task will be marked as started and is expected to be completed by the specified + // `workflow_task_timeout`. + bool request_eager_execution = 17; + // These values will be available as ContinuedFailure and LastCompletionResult in the + // WorkflowExecutionStarted event and through SDKs. The are currently only used by the + // server itself (for the schedules feature) and are not intended to be exposed in + // StartWorkflowExecution. + temporal.api.failure.v1.Failure continued_failure = 18; + temporal.api.common.v1.Payloads last_completion_result = 19; + // Time to wait before dispatching the first workflow task. Cannot be used with `cron_schedule`. + // If the workflow gets a signal before the delay, a workflow task will be dispatched and the rest + // of the delay will be ignored. + google.protobuf.Duration workflow_start_delay = 20 [(gogoproto.stdduration) = true]; +} + +message StartWorkflowExecutionResponse { + string run_id = 1; + // When `request_eager_execution` is set on the `StartWorkflowExecutionRequest`, the server - if supported - will + // return the first workflow task to be eagerly executed. + // The caller is expected to have a worker available to process the task. + PollWorkflowTaskQueueResponse eager_workflow_task = 2; +} + +message GetWorkflowExecutionHistoryRequest { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution execution = 2; + int32 maximum_page_size = 3; + // If a `GetWorkflowExecutionHistoryResponse` or a `PollWorkflowTaskQueueResponse` had one of + // these, it should be passed here to fetch the next page. + bytes next_page_token = 4; + // If set to true, the RPC call will not resolve until there is a new event which matches + // the `history_event_filter_type`, or a timeout is hit. + bool wait_new_event = 5; + // Filter returned events such that they match the specified filter type. + // Default: HISTORY_EVENT_FILTER_TYPE_ALL_EVENT. + temporal.api.enums.v1.HistoryEventFilterType history_event_filter_type = 6; + bool skip_archival = 7; +} + +message GetWorkflowExecutionHistoryResponse { + temporal.api.history.v1.History history = 1; + // Raw history is an alternate representation of history that may be returned if configured on + // the frontend. This is not supported by all SDKs. Either this or `history` will be set. + repeated temporal.api.common.v1.DataBlob raw_history = 2; + // Will be set if there are more history events than were included in this response + bytes next_page_token = 3; + bool archived = 4; +} + +message GetWorkflowExecutionHistoryReverseRequest { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution execution = 2; + int32 maximum_page_size = 3; + bytes next_page_token = 4; +} + +message GetWorkflowExecutionHistoryReverseResponse { + temporal.api.history.v1.History history = 1; + // Will be set if there are more history events than were included in this response + bytes next_page_token = 3; +} + +message PollWorkflowTaskQueueRequest { + string namespace = 1; + temporal.api.taskqueue.v1.TaskQueue task_queue = 2; + // The identity of the worker/client who is polling this task queue + string identity = 3; + // DEPRECATED since 1.21 - use `worker_version_capabilities` instead. + // Each worker process should provide an ID unique to the specific set of code it is running + // "checksum" in this field name isn't very accurate, it should be though of as an id. + string binary_checksum = 4; + // Information about this worker's build identifier and if it is choosing to use the versioning + // feature. See the `WorkerVersionCapabilities` docstring for more. + temporal.api.common.v1.WorkerVersionCapabilities worker_version_capabilities = 5; +} + +message PollWorkflowTaskQueueResponse { + // A unique identifier for this task + bytes task_token = 1; + temporal.api.common.v1.WorkflowExecution workflow_execution = 2; + temporal.api.common.v1.WorkflowType workflow_type = 3; + // The last workflow task started event which was processed by some worker for this execution. + // Will be zero if no task has ever started. + int64 previous_started_event_id = 4; + // The id of the most recent workflow task started event, which will have been generated as a + // result of this poll request being served. + int64 started_event_id = 5; + // Starting at 1, the number of attempts to complete this task by any worker. + int32 attempt = 6; + // A hint that there are more tasks already present in this task queue. Can be used to + // prioritize draining a sticky queue before polling from a normal queue. + int64 backlog_count_hint = 7; + // The history for this workflow, which will either be complete or partial. Partial histories + // are sent to workers who have signaled that they are using a sticky queue when completing + // a workflow task. + temporal.api.history.v1.History history = 8; + // Will be set if there are more history events than were included in this response. Such events + // should be fetched via `GetWorkflowExecutionHistory`. + bytes next_page_token = 9; + // Legacy queries appear in this field. The query must be responded to via + // `RespondQueryTaskCompleted`. If the workflow is already closed (queries are permitted on + // closed workflows) then the `history` field will be populated with the entire history. It + // may also be populated if this task originates on a non-sticky queue. + temporal.api.query.v1.WorkflowQuery query = 10; + // The task queue this task originated from, which will always be the original non-sticky name + // for the queue, even if this response came from polling a sticky queue. + temporal.api.taskqueue.v1.TaskQueue workflow_execution_task_queue = 11; + // When this task was scheduled by the server + google.protobuf.Timestamp scheduled_time = 12 [(gogoproto.stdtime) = true]; + // When the current workflow task started event was generated, meaning the current attempt. + google.protobuf.Timestamp started_time = 13 [(gogoproto.stdtime) = true]; + // Queries that should be executed after applying the history in this task. Responses should be + // attached to `RespondWorkflowTaskCompletedRequest::query_results` + map queries = 14; + // Protocol messages piggybacking on a WFT as a transport + repeated temporal.api.protocol.v1.Message messages = 15; +} + +message RespondWorkflowTaskCompletedRequest { + // The task token as received in `PollWorkflowTaskQueueResponse` + bytes task_token = 1; + // A list of commands generated when driving the workflow code in response to the new task + repeated temporal.api.command.v1.Command commands = 2; + // The identity of the worker/client + string identity = 3; + // May be set by workers to indicate that the worker desires future tasks to be provided with + // incremental history on a sticky queue. + temporal.api.taskqueue.v1.StickyExecutionAttributes sticky_attributes = 4; + // If set, the worker wishes to immediately receive the next workflow task as a response to + // this completion. This can save on polling round-trips. + bool return_new_workflow_task = 5; + // Can be used to *force* creation of a new workflow task, even if no commands have resolved or + // one would not otherwise have been generated. This is used when the worker knows it is doing + // something useful, but cannot complete it within the workflow task timeout. Local activities + // which run for longer than the task timeout being the prime example. + bool force_create_new_workflow_task = 6; + // DEPRECATED since 1.21 - use `worker_version_stamp` instead. + // Worker process' unique binary id + string binary_checksum = 7; + // Responses to the `queries` field in the task being responded to + map query_results = 8; + string namespace = 9; + // Version info of the worker who processed this task. This message's `build_id` field should + // always be set by SDKs. Workers opting into versioning will also set the `use_versioning` + // field to true. See message docstrings for more. + temporal.api.common.v1.WorkerVersionStamp worker_version_stamp = 10; + // Protocol messages piggybacking on a WFT as a transport + repeated temporal.api.protocol.v1.Message messages = 11; + // Data the SDK wishes to record for itself, but server need not interpret, and does not + // directly impact workflow state. + temporal.api.sdk.v1.WorkflowTaskCompletedMetadata sdk_metadata = 12; + // Local usage data collected for metering + temporal.api.common.v1.MeteringMetadata metering_metadata = 13; +} + +message RespondWorkflowTaskCompletedResponse { + // See `RespondWorkflowTaskCompletedResponse::return_new_workflow_task` + PollWorkflowTaskQueueResponse workflow_task = 1; + // See `ScheduleActivityTaskCommandAttributes::request_start` + repeated PollActivityTaskQueueResponse activity_tasks = 2; + + int64 reset_history_event_id = 3; +} + +message RespondWorkflowTaskFailedRequest { + // The task token as received in `PollWorkflowTaskQueueResponse` + bytes task_token = 1; + // Why did the task fail? It's important to note that many of the variants in this enum cannot + // apply to worker responses. See the type's doc for more. + temporal.api.enums.v1.WorkflowTaskFailedCause cause = 2; + // Failure details + temporal.api.failure.v1.Failure failure = 3; + // The identity of the worker/client + string identity = 4; + // DEPRECATED since 1.21 - use `worker_version_stamp` instead. + // Worker process' unique binary id + string binary_checksum = 5; + string namespace = 6; + // Protocol messages piggybacking on a WFT as a transport + repeated temporal.api.protocol.v1.Message messages = 7; + // Version info of the worker who processed this task. This message's `build_id` field should + // always be set by SDKs. Workers opting into versioning will also set the `use_versioning` + // field to true. See message docstrings for more. + temporal.api.common.v1.WorkerVersionStamp worker_version = 8; +} + +message RespondWorkflowTaskFailedResponse { +} + +message PollActivityTaskQueueRequest { + string namespace = 1; + temporal.api.taskqueue.v1.TaskQueue task_queue = 2; + // The identity of the worker/client + string identity = 3; + temporal.api.taskqueue.v1.TaskQueueMetadata task_queue_metadata = 4; + // Information about this worker's build identifier and if it is choosing to use the versioning + // feature. See the `WorkerVersionCapabilities` docstring for more. + temporal.api.common.v1.WorkerVersionCapabilities worker_version_capabilities = 5; +} + +message PollActivityTaskQueueResponse { + // A unique identifier for this task + bytes task_token = 1; + // The namespace the workflow which requested this activity lives in + string workflow_namespace = 2; + // Type of the requesting workflow + temporal.api.common.v1.WorkflowType workflow_type = 3; + // Execution info of the requesting workflow + temporal.api.common.v1.WorkflowExecution workflow_execution = 4; + temporal.api.common.v1.ActivityType activity_type = 5; + // The autogenerated or user specified identifier of this activity. Can be used to complete the + // activity via `RespondActivityTaskCompletedById`. May be re-used as long as the last usage + // has resolved, but unique IDs for every activity invocation is a good idea. + string activity_id = 6; + // Headers specified by the scheduling workflow. Commonly used to propagate contextual info + // from the workflow to its activities. For example, tracing contexts. + temporal.api.common.v1.Header header = 7; + // Arguments to the activity invocation + temporal.api.common.v1.Payloads input = 8; + // Details of the last heartbeat that was recorded for this activity as of the time this task + // was delivered. + temporal.api.common.v1.Payloads heartbeat_details = 9; + // When was this task first scheduled + google.protobuf.Timestamp scheduled_time = 10 [(gogoproto.stdtime) = true]; + // When was this task attempt scheduled + google.protobuf.Timestamp current_attempt_scheduled_time = 11 [(gogoproto.stdtime) = true]; + // When was this task started (this attempt) + google.protobuf.Timestamp started_time = 12 [(gogoproto.stdtime) = true]; + // Starting at 1, the number of attempts to perform this activity + int32 attempt = 13; + // First scheduled -> final result reported timeout + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration schedule_to_close_timeout = 14 [(gogoproto.stdduration) = true]; + // Current attempt start -> final result reported timeout + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration start_to_close_timeout = 15 [(gogoproto.stdduration) = true]; + // Window within which the activity must report a heartbeat, or be timed out. + google.protobuf.Duration heartbeat_timeout = 16 [(gogoproto.stdduration) = true]; + // This is the retry policy the service uses which may be different from the one provided + // (or not) during activity scheduling. The service can override the provided one if some + // values are not specified or exceed configured system limits. + temporal.api.common.v1.RetryPolicy retry_policy = 17; +} + +message RecordActivityTaskHeartbeatRequest { + // The task token as received in `PollActivityTaskQueueResponse` + bytes task_token = 1; + // Arbitrary data, of which the most recent call is kept, to store for this activity + temporal.api.common.v1.Payloads details = 2; + // The identity of the worker/client + string identity = 3; + string namespace = 4; +} + +message RecordActivityTaskHeartbeatResponse { + // Will be set to true if the activity has been asked to cancel itself. The SDK should then + // notify the activity of cancellation if it is still running. + bool cancel_requested = 1; +} + +message RecordActivityTaskHeartbeatByIdRequest { + // Namespace of the workflow which scheduled this activity + string namespace = 1; + // Id of the workflow which scheduled this activity + string workflow_id = 2; + // Run Id of the workflow which scheduled this activity + string run_id = 3; + // Id of the activity we're heartbeating + string activity_id = 4; + // Arbitrary data, of which the most recent call is kept, to store for this activity + temporal.api.common.v1.Payloads details = 5; + // The identity of the worker/client + string identity = 6; +} + +message RecordActivityTaskHeartbeatByIdResponse { + // Will be set to true if the activity has been asked to cancel itself. The SDK should then + // notify the activity of cancellation if it is still running. + bool cancel_requested = 1; +} + +message RespondActivityTaskCompletedRequest { + // The task token as received in `PollActivityTaskQueueResponse` + bytes task_token = 1; + // The result of successfully executing the activity + temporal.api.common.v1.Payloads result = 2; + // The identity of the worker/client + string identity = 3; + string namespace = 4; + // Version info of the worker who processed this task. This message's `build_id` field should + // always be set by SDKs. Workers opting into versioning will also set the `use_versioning` + // field to true. See message docstrings for more. + temporal.api.common.v1.WorkerVersionStamp worker_version = 5; +} + +message RespondActivityTaskCompletedResponse { +} + +message RespondActivityTaskCompletedByIdRequest { + // Namespace of the workflow which scheduled this activity + string namespace = 1; + // Id of the workflow which scheduled this activity + string workflow_id = 2; + // Run Id of the workflow which scheduled this activity + string run_id = 3; + // Id of the activity to complete + string activity_id = 4; + // The serialized result of activity execution + temporal.api.common.v1.Payloads result = 5; + // The identity of the worker/client + string identity = 6; +} + +message RespondActivityTaskCompletedByIdResponse { +} + +message RespondActivityTaskFailedRequest { + // The task token as received in `PollActivityTaskQueueResponse` + bytes task_token = 1; + // Detailed failure information + temporal.api.failure.v1.Failure failure = 2; + // The identity of the worker/client + string identity = 3; + string namespace = 4; + // Additional details to be stored as last activity heartbeat + temporal.api.common.v1.Payloads last_heartbeat_details = 5; + // Version info of the worker who processed this task. This message's `build_id` field should + // always be set by SDKs. Workers opting into versioning will also set the `use_versioning` + // field to true. See message docstrings for more. + temporal.api.common.v1.WorkerVersionStamp worker_version = 6; +} + +message RespondActivityTaskFailedResponse { + // Server validation failures could include + // last_heartbeat_details payload is too large, request failure is too large + repeated temporal.api.failure.v1.Failure failures = 1; +} + +message RespondActivityTaskFailedByIdRequest { + // Namespace of the workflow which scheduled this activity + string namespace = 1; + // Id of the workflow which scheduled this activity + string workflow_id = 2; + // Run Id of the workflow which scheduled this activity + string run_id = 3; + // Id of the activity to fail + string activity_id = 4; + // Detailed failure information + temporal.api.failure.v1.Failure failure = 5; + // The identity of the worker/client + string identity = 6; + // Additional details to be stored as last activity heartbeat + temporal.api.common.v1.Payloads last_heartbeat_details = 7; +} + +message RespondActivityTaskFailedByIdResponse { + // Server validation failures could include + // last_heartbeat_details payload is too large, request failure is too large + repeated temporal.api.failure.v1.Failure failures = 1; +} + +message RespondActivityTaskCanceledRequest { + // The task token as received in `PollActivityTaskQueueResponse` + bytes task_token = 1; + // Serialized additional information to attach to the cancellation + temporal.api.common.v1.Payloads details = 2; + // The identity of the worker/client + string identity = 3; + string namespace = 4; + // Version info of the worker who processed this task. This message's `build_id` field should + // always be set by SDKs. Workers opting into versioning will also set the `use_versioning` + // field to true. See message docstrings for more. + temporal.api.common.v1.WorkerVersionStamp worker_version = 5; +} + +message RespondActivityTaskCanceledResponse { +} + +message RespondActivityTaskCanceledByIdRequest { + // Namespace of the workflow which scheduled this activity + string namespace = 1; + // Id of the workflow which scheduled this activity + string workflow_id = 2; + // Run Id of the workflow which scheduled this activity + string run_id = 3; + // Id of the activity to confirm is cancelled + string activity_id = 4; + // Serialized additional information to attach to the cancellation + temporal.api.common.v1.Payloads details = 5; + // The identity of the worker/client + string identity = 6; +} + +message RespondActivityTaskCanceledByIdResponse { +} + +message RequestCancelWorkflowExecutionRequest { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution workflow_execution = 2; + // The identity of the worker/client + string identity = 3; + // Used to de-dupe cancellation requests + string request_id = 4; + // If set, this call will error if the most recent (if no run id is set on + // `workflow_execution`), or specified (if it is) workflow execution is not part of the same + // execution chain as this id. + string first_execution_run_id = 5; + // Reason for requesting the cancellation + string reason = 6; +} + +message RequestCancelWorkflowExecutionResponse { +} + +message SignalWorkflowExecutionRequest { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution workflow_execution = 2; + // The workflow author-defined name of the signal to send to the workflow + string signal_name = 3; + // Serialized value(s) to provide with the signal + temporal.api.common.v1.Payloads input = 4; + // The identity of the worker/client + string identity = 5; + // Used to de-dupe sent signals + string request_id = 6; + // Deprecated + string control = 7; + // Headers that are passed with the signal to the processing workflow. + // These can include things like auth or tracing tokens. + temporal.api.common.v1.Header header = 8; + // Indicates that a new workflow task should not be generated when this signal is received. + bool skip_generate_workflow_task = 9; +} + +message SignalWorkflowExecutionResponse { +} + +message SignalWithStartWorkflowExecutionRequest { + string namespace = 1; + string workflow_id = 2; + temporal.api.common.v1.WorkflowType workflow_type = 3; + // The task queue to start this workflow on, if it will be started + temporal.api.taskqueue.v1.TaskQueue task_queue = 4; + // Serialized arguments to the workflow. These are passed as arguments to the workflow function. + temporal.api.common.v1.Payloads input = 5; + // Total workflow execution timeout including retries and continue as new + google.protobuf.Duration workflow_execution_timeout = 6 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow run + google.protobuf.Duration workflow_run_timeout = 7 [(gogoproto.stdduration) = true]; + // Timeout of a single workflow task + google.protobuf.Duration workflow_task_timeout = 8 [(gogoproto.stdduration) = true]; + // The identity of the worker/client + string identity = 9; + // Used to de-dupe signal w/ start requests + string request_id = 10; + temporal.api.enums.v1.WorkflowIdReusePolicy workflow_id_reuse_policy = 11; + // The workflow author-defined name of the signal to send to the workflow + string signal_name = 12; + // Serialized value(s) to provide with the signal + temporal.api.common.v1.Payloads signal_input = 13; + // Deprecated + string control = 14; + // Retry policy for the workflow Default: WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE. + temporal.api.common.v1.RetryPolicy retry_policy = 15; + // See https://docs.temporal.io/docs/content/what-is-a-temporal-cron-job/ + string cron_schedule = 16; + temporal.api.common.v1.Memo memo = 17; + temporal.api.common.v1.SearchAttributes search_attributes = 18; + temporal.api.common.v1.Header header = 19; + // Time to wait before dispatching the first workflow task. Cannot be used with `cron_schedule`. + // Note that the signal will be delivered with the first workflow task. If the workflow gets + // another SignalWithStartWorkflow before the delay and `skip_generate_workflow_task` is false + // or not set, a workflow task will be dispatched immediately and the rest of the delay period + // will be ignored, even if that request also had a delay. Signal via SignalWorkflowExecution + // will not unblock the workflow. + google.protobuf.Duration workflow_start_delay = 20 [(gogoproto.stdduration) = true]; + // Indicates that a new workflow task should not be generated when this signal is received. + bool skip_generate_workflow_task = 21; +} + +message SignalWithStartWorkflowExecutionResponse { + string run_id = 1; +} + +message ResetWorkflowExecutionRequest { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution workflow_execution = 2; + string reason = 3; + // The id of a `WORKFLOW_TASK_COMPLETED`,`WORKFLOW_TASK_TIMED_OUT`, `WORKFLOW_TASK_FAILED`, or + // `WORKFLOW_TASK_STARTED` event to reset to. + int64 workflow_task_finish_event_id = 4; + // Used to de-dupe reset requests + string request_id = 5; + // Reset reapply (replay) options. + temporal.api.enums.v1.ResetReapplyType reset_reapply_type = 6; +} + +message ResetWorkflowExecutionResponse { + string run_id = 1; +} + +message TerminateWorkflowExecutionRequest { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution workflow_execution = 2; + string reason = 3; + // Serialized additional information to attach to the termination event + temporal.api.common.v1.Payloads details = 4; + // The identity of the worker/client + string identity = 5; + // If set, this call will error if the most recent (if no run id is set on + // `workflow_execution`), or specified (if it is) workflow execution is not part of the same + // execution chain as this id. + string first_execution_run_id = 6; +} + +message TerminateWorkflowExecutionResponse { +} + +// (-- api-linter: core::0135::request-unknown-fields=disabled +// aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) +// (-- api-linter: core::0135::request-name-required=disabled +// aip.dev/not-precedent: DeleteNamespace RPC doesn't follow Google API format. --) +message DeleteWorkflowExecutionRequest { + string namespace = 1; + // Workflow Execution to delete. If run_id is not specified, the latest one is used. + temporal.api.common.v1.WorkflowExecution workflow_execution = 2; +} + +message DeleteWorkflowExecutionResponse { +} + +message ListOpenWorkflowExecutionsRequest { + string namespace = 1; + int32 maximum_page_size = 2; + bytes next_page_token = 3; + temporal.api.filter.v1.StartTimeFilter start_time_filter = 4; + oneof filters { + temporal.api.filter.v1.WorkflowExecutionFilter execution_filter = 5; + temporal.api.filter.v1.WorkflowTypeFilter type_filter = 6; + } +} + +message ListOpenWorkflowExecutionsResponse { + repeated temporal.api.workflow.v1.WorkflowExecutionInfo executions = 1; + bytes next_page_token = 2; +} + +message ListClosedWorkflowExecutionsRequest { + string namespace = 1; + int32 maximum_page_size = 2; + bytes next_page_token = 3; + temporal.api.filter.v1.StartTimeFilter start_time_filter = 4; + oneof filters { + temporal.api.filter.v1.WorkflowExecutionFilter execution_filter = 5; + temporal.api.filter.v1.WorkflowTypeFilter type_filter = 6; + temporal.api.filter.v1.StatusFilter status_filter = 7; + } +} + +message ListClosedWorkflowExecutionsResponse { + repeated temporal.api.workflow.v1.WorkflowExecutionInfo executions = 1; + bytes next_page_token = 2; +} + +message ListWorkflowExecutionsRequest { + string namespace = 1; + int32 page_size = 2; + bytes next_page_token = 3; + string query = 4; +} + +message ListWorkflowExecutionsResponse { + repeated temporal.api.workflow.v1.WorkflowExecutionInfo executions = 1; + bytes next_page_token = 2; +} + +message ListArchivedWorkflowExecutionsRequest { + string namespace = 1; + int32 page_size = 2; + bytes next_page_token = 3; + string query = 4; +} + +message ListArchivedWorkflowExecutionsResponse { + repeated temporal.api.workflow.v1.WorkflowExecutionInfo executions = 1; + bytes next_page_token = 2; +} + +message ScanWorkflowExecutionsRequest { + string namespace = 1; + int32 page_size = 2; + bytes next_page_token = 3; + string query = 4; +} + +message ScanWorkflowExecutionsResponse { + repeated temporal.api.workflow.v1.WorkflowExecutionInfo executions = 1; + bytes next_page_token = 2; +} + +message CountWorkflowExecutionsRequest { + string namespace = 1; + string query = 2; +} + +message CountWorkflowExecutionsResponse { + // If `query` is not grouping by any field, the count is an approximate number + // of workflows that matches the query. + // If `query` is grouping by a field, the count is simply the sum of the counts + // of the groups returned in the response. This number can be smaller than the + // total number of workflows matching the query. + int64 count = 1; + + // `groups` contains the groups if the request is grouping by a field. + // The list might not be complete, and the counts of each group is approximate. + repeated AggregationGroup groups = 2; + + message AggregationGroup { + repeated temporal.api.common.v1.Payload group_values = 1; + int64 count = 2; + } +} + +message GetSearchAttributesRequest { +} + +message GetSearchAttributesResponse { + map keys = 1; +} + +message RespondQueryTaskCompletedRequest { + bytes task_token = 1; + temporal.api.enums.v1.QueryResultType completed_type = 2; + temporal.api.common.v1.Payloads query_result = 3; + string error_message = 4; + reserved 5; + string namespace = 6; +} + +message RespondQueryTaskCompletedResponse { +} + +message ResetStickyTaskQueueRequest { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution execution = 2; +} + +message ResetStickyTaskQueueResponse { +} + +message QueryWorkflowRequest { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution execution = 2; + temporal.api.query.v1.WorkflowQuery query = 3; + // QueryRejectCondition can used to reject the query if workflow state does not satisfy condition. + // Default: QUERY_REJECT_CONDITION_NONE. + temporal.api.enums.v1.QueryRejectCondition query_reject_condition = 4; +} + +message QueryWorkflowResponse { + temporal.api.common.v1.Payloads query_result = 1; + temporal.api.query.v1.QueryRejected query_rejected = 2; +} + +message DescribeWorkflowExecutionRequest { + string namespace = 1; + temporal.api.common.v1.WorkflowExecution execution = 2; +} + +message DescribeWorkflowExecutionResponse { + temporal.api.workflow.v1.WorkflowExecutionConfig execution_config = 1; + temporal.api.workflow.v1.WorkflowExecutionInfo workflow_execution_info = 2; + repeated temporal.api.workflow.v1.PendingActivityInfo pending_activities = 3; + repeated temporal.api.workflow.v1.PendingChildExecutionInfo pending_children = 4; + temporal.api.workflow.v1.PendingWorkflowTaskInfo pending_workflow_task = 5; +} + +message DescribeTaskQueueRequest { + string namespace = 1; + temporal.api.taskqueue.v1.TaskQueue task_queue = 2; + // If unspecified (TASK_QUEUE_TYPE_UNSPECIFIED), then default value (TASK_QUEUE_TYPE_WORKFLOW) will be used. + temporal.api.enums.v1.TaskQueueType task_queue_type = 3; + bool include_task_queue_status = 4; +} + +message DescribeTaskQueueResponse { + repeated temporal.api.taskqueue.v1.PollerInfo pollers = 1; + temporal.api.taskqueue.v1.TaskQueueStatus task_queue_status = 2; +} + +message GetClusterInfoRequest { +} + +// GetClusterInfoResponse contains information about Temporal cluster. +message GetClusterInfoResponse { + // Key is client name i.e "temporal-go", "temporal-java", or "temporal-cli". + // Value is ranges of supported versions of this client i.e ">1.1.1 <=1.4.0 || ^5.0.0". + map supported_clients = 1; + string server_version = 2; + string cluster_id = 3; + temporal.api.version.v1.VersionInfo version_info = 4; + string cluster_name = 5; + int32 history_shard_count = 6; + string persistence_store = 7; + string visibility_store = 8; +} + +message GetSystemInfoRequest { +} + +message GetSystemInfoResponse { + // Version of the server. + string server_version = 1; + + // All capabilities the system supports. + Capabilities capabilities = 2; + + // System capability details. + message Capabilities { + // True if signal and query headers are supported. + bool signal_and_query_header = 1; + + // True if internal errors are differentiated from other types of errors for purposes of + // retrying non-internal errors. + // + // When unset/false, clients retry all failures. When true, clients should only retry + // non-internal errors. + bool internal_error_differentiation = 2; + + // True if RespondActivityTaskFailed API supports including heartbeat details + bool activity_failure_include_heartbeat = 3; + + // Supports scheduled workflow features. + bool supports_schedules = 4; + + // True if server uses protos that include temporal.api.failure.v1.Failure.encoded_attributes + bool encoded_failure_attributes = 5; + + // True if server supports dispatching Workflow and Activity tasks based on a worker's build_id + // (see: + // https://github.com/temporalio/proposals/blob/a123af3b559f43db16ea6dd31870bfb754c4dc5e/versioning/worker-versions.md) + bool build_id_based_versioning = 6; + + // True if server supports upserting workflow memo + bool upsert_memo = 7; + + // True if server supports eager workflow task dispatching for the StartWorkflowExecution API + bool eager_workflow_start = 8; + + // True if the server knows about the sdk metadata field on WFT completions and will record + // it in history + bool sdk_metadata = 9; + + // True if the server supports count group by execution status + // (-- api-linter: core::0140::prepositions=disabled --) + bool count_group_by_execution_status = 10; + } +} + +message ListTaskQueuePartitionsRequest { + string namespace = 1; + temporal.api.taskqueue.v1.TaskQueue task_queue = 2; +} + +message ListTaskQueuePartitionsResponse { + repeated temporal.api.taskqueue.v1.TaskQueuePartitionMetadata activity_task_queue_partitions = 1; + repeated temporal.api.taskqueue.v1.TaskQueuePartitionMetadata workflow_task_queue_partitions = 2; +} + +// (-- api-linter: core::0133::request-parent-required=disabled +// aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) +// (-- api-linter: core::0133::request-unknown-fields=disabled +// aip.dev/not-precedent: CreateSchedule doesn't follow Google API format --) +// (-- api-linter: core::0133::request-resource-behavior=disabled +// aip.dev/not-precedent: field_behavior annotation not available in our gogo fork --) +// (-- api-linter: core::0203::optional=disabled +// aip.dev/not-precedent: field_behavior annotation not available in our gogo fork --) +message CreateScheduleRequest { + // The namespace the schedule should be created in. + string namespace = 1; + // The id of the new schedule. + string schedule_id = 2; + // The schedule spec, policies, action, and initial state. + temporal.api.schedule.v1.Schedule schedule = 3; + // Optional initial patch (e.g. to run the action once immediately). + temporal.api.schedule.v1.SchedulePatch initial_patch = 4; + // The identity of the client who initiated this request. + string identity = 5; + // A unique identifier for this create request for idempotence. Typically UUIDv4. + string request_id = 6; + // Memo and search attributes to attach to the schedule itself. + temporal.api.common.v1.Memo memo = 7; + temporal.api.common.v1.SearchAttributes search_attributes = 8; +} + +message CreateScheduleResponse { + bytes conflict_token = 1; +} + +message DescribeScheduleRequest { + // The namespace of the schedule to describe. + string namespace = 1; + // The id of the schedule to describe. + string schedule_id = 2; +} + +message DescribeScheduleResponse { + // The complete current schedule details. This may not match the schedule as + // created because: + // - some types of schedule specs may get compiled into others (e.g. + // CronString into StructuredCalendarSpec) + // - some unspecified fields may be replaced by defaults + // - some fields in the state are modified automatically + // - the schedule may have been modified by UpdateSchedule or PatchSchedule + temporal.api.schedule.v1.Schedule schedule = 1; + // Extra schedule state info. + temporal.api.schedule.v1.ScheduleInfo info = 2; + // The memo and search attributes that the schedule was created with. + temporal.api.common.v1.Memo memo = 3; + temporal.api.common.v1.SearchAttributes search_attributes = 4; + + // This value can be passed back to UpdateSchedule to ensure that the + // schedule was not modified between a Describe and an Update, which could + // lead to lost updates and other confusion. + bytes conflict_token = 5; +} + +// (-- api-linter: core::0134::request-mask-required=disabled +// aip.dev/not-precedent: UpdateSchedule doesn't follow Google API format --) +message UpdateScheduleRequest { + // The namespace of the schedule to update. + string namespace = 1; + // The id of the schedule to update. + string schedule_id = 2; + // The new schedule. The four main fields of the schedule (spec, action, + // policies, state) are replaced completely by the values in this message. + temporal.api.schedule.v1.Schedule schedule = 3; + // This can be the value of conflict_token from a DescribeScheduleResponse, + // which will cause this request to fail if the schedule has been modified + // between the Describe and this Update. + // If missing, the schedule will be updated unconditionally. + bytes conflict_token = 4; + // The identity of the client who initiated this request. + string identity = 5; + // A unique identifier for this update request for idempotence. Typically UUIDv4. + string request_id = 6; +} + +message UpdateScheduleResponse { +} + +message PatchScheduleRequest { + // The namespace of the schedule to patch. + string namespace = 1; + // The id of the schedule to patch. + string schedule_id = 2; + temporal.api.schedule.v1.SchedulePatch patch = 3; + // The identity of the client who initiated this request. + string identity = 4; + // A unique identifier for this update request for idempotence. Typically UUIDv4. + string request_id = 5; +} + +message PatchScheduleResponse { +} + +message ListScheduleMatchingTimesRequest { + // The namespace of the schedule to query. + string namespace = 1; + // The id of the schedule to query. + string schedule_id = 2; + // Time range to query. + google.protobuf.Timestamp start_time = 3 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp end_time = 4 [(gogoproto.stdtime) = true]; +} + +message ListScheduleMatchingTimesResponse { + repeated google.protobuf.Timestamp start_time = 1 [(gogoproto.stdtime) = true]; +} + +// (-- api-linter: core::0135::request-name-required=disabled +// aip.dev/not-precedent: DeleteSchedule doesn't follow Google API format --) +// (-- api-linter: core::0135::request-unknown-fields=disabled +// aip.dev/not-precedent: DeleteSchedule doesn't follow Google API format --) +message DeleteScheduleRequest { + // The namespace of the schedule to delete. + string namespace = 1; + // The id of the schedule to delete. + string schedule_id = 2; + // The identity of the client who initiated this request. + string identity = 3; +} + +message DeleteScheduleResponse { +} + +message ListSchedulesRequest { + // The namespace to list schedules in. + string namespace = 1; + // How many to return at once. + int32 maximum_page_size = 2; + // Token to get the next page of results. + bytes next_page_token = 3; +} + +message ListSchedulesResponse { + repeated temporal.api.schedule.v1.ScheduleListEntry schedules = 1; + bytes next_page_token = 2; +} + +// (-- api-linter: core::0134::request-mask-required=disabled +// aip.dev/not-precedent: UpdateWorkerBuildIdCompatibilityRequest doesn't follow Google API format --) +// (-- api-linter: core::0134::request-resource-required=disabled +// aip.dev/not-precedent: UpdateWorkerBuildIdCompatibilityRequest RPC doesn't follow Google API format. --) +message UpdateWorkerBuildIdCompatibilityRequest { + message AddNewCompatibleVersion { + // A new id to be added to an existing compatible set. + string new_build_id = 1; + // A build id which must already exist in the version sets known by the task queue. The new + // id will be stored in the set containing this id, marking it as compatible with + // the versions within. + string existing_compatible_build_id = 2; + // When set, establishes the compatible set being targeted as the overall default for the + // queue. If a different set was the current default, the targeted set will replace it as + // the new default. + bool make_set_default = 3; + } + + message MergeSets { + // A build ID in the set whose default will become the merged set default + string primary_set_build_id = 1; + // A build ID in the set which will be merged into the primary set + string secondary_set_build_id = 2; + } + + string namespace = 1; + // Must be set, the task queue to apply changes to. Because all workers on a given task queue + // must have the same set of workflow & activity implementations, there is no reason to specify + // a task queue type here. + string task_queue = 2; + oneof operation { + // A new build id. This operation will create a new set which will be the new overall + // default version for the queue, with this id as its only member. This new set is + // incompatible with all previous sets/versions. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: In makes perfect sense here. --) + string add_new_build_id_in_new_default_set = 3; + // Adds a new id to an existing compatible set, see sub-message definition for more. + AddNewCompatibleVersion add_new_compatible_build_id = 4; + // Promote an existing set to be the current default (if it isn't already) by targeting + // an existing build id within it. This field's value is the extant build id. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: Names are hard. --) + string promote_set_by_build_id = 5; + // Promote an existing build id within some set to be the current default for that set. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: Within makes perfect sense here. --) + string promote_build_id_within_set = 6; + // Merge two existing sets together, thus declaring all build IDs in both sets compatible + // with one another. The primary set's default will become the default for the merged set. + // This is useful if you've accidentally declared a new ID as incompatible you meant to + // declare as compatible. The unusual case of incomplete replication during failover could + // also result in a split set, which this operation can repair. + MergeSets merge_sets = 7; + } +} +message UpdateWorkerBuildIdCompatibilityResponse { + reserved 1; + reserved "version_set_id"; +} + +// (-- api-linter: core::0134::request-resource-required=disabled +// aip.dev/not-precedent: GetWorkerBuildIdCompatibilityRequest RPC doesn't follow Google API format. --) +message GetWorkerBuildIdCompatibilityRequest { + string namespace = 1; + // Must be set, the task queue to interrogate about worker id compatibility. + string task_queue = 2; + // Limits how many compatible sets will be returned. Specify 1 to only return the current + // default major version set. 0 returns all sets. + int32 max_sets = 3; +} +message GetWorkerBuildIdCompatibilityResponse { + // Major version sets, in order from oldest to newest. The last element of the list will always + // be the current default major version. IE: New workflows will target the most recent version + // in that version set. + // + // There may be fewer sets returned than exist, if the request chose to limit this response. + repeated temporal.api.taskqueue.v1.CompatibleVersionSet major_version_sets = 1; +} + +message GetWorkerTaskReachabilityRequest { + string namespace = 1; + // Build ids to retrieve reachability for. An empty string will be interpreted as an unversioned worker. + // The number of build ids that can be queried in a single API call is limited. + // Open source users can adjust this limit by setting the server's dynamic config value for + // `limit.reachabilityQueryBuildIds` with the caveat that this call can strain the visibility store. + repeated string build_ids = 2; + + // Task queues to retrieve reachability for. Leave this empty to query for all task queues associated with given + // build ids in the namespace. + // Must specify at least one task queue if querying for an unversioned worker. + // The number of task queues that the server will fetch reachability information for is limited. + // See the `GetWorkerTaskReachabilityResponse` documentation for more information. + repeated string task_queues = 3; + + // Type of reachability to query for. + // `TASK_REACHABILITY_NEW_WORKFLOWS` is always returned in the response. + // Use `TASK_REACHABILITY_EXISTING_WORKFLOWS` if your application needs to respond to queries on closed workflows. + // Otherwise, use `TASK_REACHABILITY_OPEN_WORKFLOWS`. Default is `TASK_REACHABILITY_EXISTING_WORKFLOWS` if left + // unspecified. + // See the TaskReachability docstring for information about each enum variant. + temporal.api.enums.v1.TaskReachability reachability = 4; +} + +message GetWorkerTaskReachabilityResponse { + // Task reachability, broken down by build id and then task queue. + // When requesting a large number of task queues or all task queues associated with the given build ids in a + // namespace, all task queues will be listed in the response but some of them may not contain reachability + // information due to a server enforced limit. When reaching the limit, task queues that reachability information + // could not be retrieved for will be marked with a single TASK_REACHABILITY_UNSPECIFIED entry. The caller may issue + // another call to get the reachability for those task queues. + // + // Open source users can adjust this limit by setting the server's dynamic config value for + // `limit.reachabilityTaskQueueScan` with the caveat that this call can strain the visibility store. + repeated temporal.api.taskqueue.v1.BuildIdReachability build_id_reachability = 1; +} + +// (-- api-linter: core::0134=disabled +// aip.dev/not-precedent: Update RPCs don't follow Google API format. --) +message UpdateWorkflowExecutionRequest { + // The namespace name of the target workflow + string namespace = 1; + // The target workflow id and (optionally) a specific run thereof + // (-- api-linter: core::0203::optional=disabled + // aip.dev/not-precedent: false positive triggered by the word "optional" --) + temporal.api.common.v1.WorkflowExecution workflow_execution = 2; + // If set, this call will error if the most recent (if no run id is set on + // `workflow_execution`), or specified (if it is) workflow execution is not + // part of the same execution chain as this id. + string first_execution_run_id = 3; + + // Describes when this request should return - basically whether the + // update is synchronous, asynchronous, or somewhere in between. + temporal.api.update.v1.WaitPolicy wait_policy = 4; + + // The request information that will be delivered all the way down to the + // workflow execution. + temporal.api.update.v1.Request request = 5; +} + +message UpdateWorkflowExecutionResponse { + // Enough information for subsequent poll calls if needed. Never null. + temporal.api.update.v1.UpdateRef update_ref = 1; + + // The outcome of the update if and only if the workflow execution update + // has completed. If this response is being returned before the update has + // completed then this field will not be set. + temporal.api.update.v1.Outcome outcome = 2; + + // The most advanced lifecycle stage that the Update is known to have + // reached, where lifecycle stages are ordered + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_UNSPECIFIED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ADMITTED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED. + // UNSPECIFIED will be returned if and only if the server's maximum wait + // time was reached before the Update reached the stage specified in the + // request WaitPolicy, and before the context deadline expired; clients may + // may then retry the call as needed. + temporal.api.enums.v1.UpdateWorkflowExecutionLifecycleStage stage = 3; +} + +message StartBatchOperationRequest { + // Namespace that contains the batch operation + string namespace = 1; + // Visibility query defines the the group of workflow to apply the batch operation + // This field and `executions` are mutually exclusive + string visibility_query = 2; + // Job ID defines the unique ID for the batch job + string job_id = 3; + // Reason to perform the batch operation + string reason = 4; + // Executions to apply the batch operation + // This field and `visibility_query` are mutually exclusive + repeated temporal.api.common.v1.WorkflowExecution executions = 5; + // Limit for the number of operations processed per second within this batch. + // Its purpose is to reduce the stress on the system caused by batch operations, which helps to prevent system + // overload and minimize potential delays in executing ongoing tasks for user workers. + // Note that when no explicit limit is provided, the server will operate according to its limit defined by the + // dynamic configuration key `worker.batcherRPS`. This also applies if the value in this field exceeds the + // server's configured limit. + float max_operations_per_second = 6; + // Operation input + oneof operation { + temporal.api.batch.v1.BatchOperationTermination termination_operation = 10; + temporal.api.batch.v1.BatchOperationSignal signal_operation = 11; + temporal.api.batch.v1.BatchOperationCancellation cancellation_operation = 12; + temporal.api.batch.v1.BatchOperationDeletion deletion_operation = 13; + temporal.api.batch.v1.BatchOperationReset reset_operation = 14; + } +} + +message StartBatchOperationResponse { +} + +message StopBatchOperationRequest { + // Namespace that contains the batch operation + string namespace = 1; + // Batch job id + string job_id = 2; + // Reason to stop a batch operation + string reason = 3; + // Identity of the operator + string identity = 4; +} + +message StopBatchOperationResponse { +} + +message DescribeBatchOperationRequest { + // Namespace that contains the batch operation + string namespace = 1; + // Batch job id + string job_id = 2; +} + +message DescribeBatchOperationResponse { + // Batch operation type + temporal.api.enums.v1.BatchOperationType operation_type = 1; + // Batch job ID + string job_id = 2; + // Batch operation state + temporal.api.enums.v1.BatchOperationState state = 3; + // Batch operation start time + google.protobuf.Timestamp start_time = 4 [(gogoproto.stdtime) = true]; + // Batch operation close time + google.protobuf.Timestamp close_time = 5 [(gogoproto.stdtime) = true]; + // Total operation count + int64 total_operation_count = 6; + // Complete operation count + int64 complete_operation_count = 7; + // Failure operation count + int64 failure_operation_count = 8; + // Identity indicates the operator identity + string identity = 9; + // Reason indicates the reason to stop a operation + string reason = 10; +} + +message ListBatchOperationsRequest { + // Namespace that contains the batch operation + string namespace = 1; + // List page size + int32 page_size = 2; + // Next page token + bytes next_page_token = 3; +} + +message ListBatchOperationsResponse { + // BatchOperationInfo contains the basic info about batch operation + repeated temporal.api.batch.v1.BatchOperationInfo operation_info = 1; + bytes next_page_token = 2; +} + +message PollWorkflowExecutionUpdateRequest { + // The namespace of the workflow execution to which the update was + // originally issued. + string namespace = 1; + // The update reference returned in the initial + // UpdateWorkflowExecutionResponse + temporal.api.update.v1.UpdateRef update_ref = 2; + // The identity of the worker/client who is polling this update outcome + string identity = 3; + // Describes when this poll request should return a response. + // Omit to request a non-blocking poll. + temporal.api.update.v1.WaitPolicy wait_policy = 4; +} + +message PollWorkflowExecutionUpdateResponse { + // The outcome of the update if and only if the update has completed. If + // this response is being returned before the update has completed (e.g. due + // to the specification of a wait policy that only waits on + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED) then this field will + // not be set. + temporal.api.update.v1.Outcome outcome = 1; + // The most advanced lifecycle stage that the Update is known to have + // reached, where lifecycle stages are ordered + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_UNSPECIFIED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ADMITTED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_ACCEPTED < + // UPDATE_WORKFLOW_EXECUTION_LIFECYCLE_STAGE_COMPLETED. + // UNSPECIFIED will be returned if and only if the server's maximum wait + // time was reached before the Update reached the stage specified in the + // request WaitPolicy, and before the context deadline expired; clients may + // may then retry the call as needed. + temporal.api.enums.v1.UpdateWorkflowExecutionLifecycleStage stage = 2; + // Sufficient information to address this update. + temporal.api.update.v1.UpdateRef update_ref = 3; +} diff --git a/.temporal-api/temporal/api/workflowservice/v1/service.proto b/.temporal-api/temporal/api/workflowservice/v1/service.proto new file mode 100644 index 0000000..2bd2cf3 --- /dev/null +++ b/.temporal-api/temporal/api/workflowservice/v1/service.proto @@ -0,0 +1,606 @@ +// The MIT License +// +// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +syntax = "proto3"; + +package temporal.api.workflowservice.v1; + +option go_package = "go.temporal.io/api/workflowservice/v1;workflowservice"; +option java_package = "io.temporal.api.workflowservice.v1"; +option java_multiple_files = true; +option java_outer_classname = "ServiceProto"; +option ruby_package = "Temporalio::Api::WorkflowService::V1"; +option csharp_namespace = "Temporalio.Api.WorkflowService.V1"; + + +import "temporal/api/workflowservice/v1/request_response.proto"; +import "google/api/annotations.proto"; + +// WorkflowService API defines how Temporal SDKs and other clients interact with the Temporal server +// to create and interact with workflows and activities. +// +// Users are expected to call `StartWorkflowExecution` to create a new workflow execution. +// +// To drive workflows, a worker using a Temporal SDK must exist which regularly polls for workflow +// and activity tasks from the service. For each workflow task, the sdk must process the +// (incremental or complete) event history and respond back with any newly generated commands. +// +// For each activity task, the worker is expected to execute the user's code which implements that +// activity, responding with completion or failure. +service WorkflowService { + + // RegisterNamespace creates a new namespace which can be used as a container for all resources. + // + // A Namespace is a top level entity within Temporal, and is used as a container for resources + // like workflow executions, task queues, etc. A Namespace acts as a sandbox and provides + // isolation for all resources within the namespace. All resources belongs to exactly one + // namespace. + rpc RegisterNamespace (RegisterNamespaceRequest) returns (RegisterNamespaceResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces" + body: "*" + }; + } + + // DescribeNamespace returns the information and configuration for a registered namespace. + rpc DescribeNamespace (DescribeNamespaceRequest) returns (DescribeNamespaceResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}" + }; + } + + // ListNamespaces returns the information and configuration for all namespaces. + rpc ListNamespaces (ListNamespacesRequest) returns (ListNamespacesResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces" + }; + } + + // UpdateNamespace is used to update the information and configuration of a registered + // namespace. + rpc UpdateNamespace (UpdateNamespaceRequest) returns (UpdateNamespaceResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/update" + body: "*" + }; + } + + // DeprecateNamespace is used to update the state of a registered namespace to DEPRECATED. + // + // Once the namespace is deprecated it cannot be used to start new workflow executions. Existing + // workflow executions will continue to run on deprecated namespaces. + // Deprecated. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: Deprecated --) + rpc DeprecateNamespace (DeprecateNamespaceRequest) returns (DeprecateNamespaceResponse) { + } + + // StartWorkflowExecution starts a new workflow execution. + // + // It will create the execution with a `WORKFLOW_EXECUTION_STARTED` event in its history and + // also schedule the first workflow task. Returns `WorkflowExecutionAlreadyStarted`, if an + // instance already exists with same workflow id. + rpc StartWorkflowExecution (StartWorkflowExecutionRequest) returns (StartWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_id}" + body: "*" + }; + } + + // GetWorkflowExecutionHistory returns the history of specified workflow execution. Fails with + // `NotFound` if the specified workflow execution is unknown to the service. + rpc GetWorkflowExecutionHistory (GetWorkflowExecutionHistoryRequest) returns (GetWorkflowExecutionHistoryResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/workflows/{execution.workflow_id}/history" + }; + } + + // GetWorkflowExecutionHistoryReverse returns the history of specified workflow execution in reverse + // order (starting from last event). Fails with`NotFound` if the specified workflow execution is + // unknown to the service. + rpc GetWorkflowExecutionHistoryReverse (GetWorkflowExecutionHistoryReverseRequest) returns (GetWorkflowExecutionHistoryReverseResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/workflows/{execution.workflow_id}/history-reverse" + }; + } + + // PollWorkflowTaskQueue is called by workers to make progress on workflows. + // + // A WorkflowTask is dispatched to callers for active workflow executions with pending workflow + // tasks. The worker is expected to call `RespondWorkflowTaskCompleted` when it is done + // processing the task. The service will create a `WorkflowTaskStarted` event in the history for + // this task before handing it to the worker. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) + rpc PollWorkflowTaskQueue (PollWorkflowTaskQueueRequest) returns (PollWorkflowTaskQueueResponse) { + } + + // RespondWorkflowTaskCompleted is called by workers to successfully complete workflow tasks + // they received from `PollWorkflowTaskQueue`. + // + // Completing a WorkflowTask will write a `WORKFLOW_TASK_COMPLETED` event to the workflow's + // history, along with events corresponding to whatever commands the SDK generated while + // executing the task (ex timer started, activity task scheduled, etc). + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) + rpc RespondWorkflowTaskCompleted (RespondWorkflowTaskCompletedRequest) returns (RespondWorkflowTaskCompletedResponse) { + } + + // RespondWorkflowTaskFailed is called by workers to indicate the processing of a workflow task + // failed. + // + // This results in a `WORKFLOW_TASK_FAILED` event written to the history, and a new workflow + // task will be scheduled. This API can be used to report unhandled failures resulting from + // applying the workflow task. + // + // Temporal will only append first WorkflowTaskFailed event to the history of workflow execution + // for consecutive failures. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) + rpc RespondWorkflowTaskFailed (RespondWorkflowTaskFailedRequest) returns (RespondWorkflowTaskFailedResponse) { + } + + // PollActivityTaskQueue is called by workers to process activity tasks from a specific task + // queue. + // + // The worker is expected to call one of the `RespondActivityTaskXXX` methods when it is done + // processing the task. + // + // An activity task is dispatched whenever a `SCHEDULE_ACTIVITY_TASK` command is produced during + // workflow execution. An in memory `ACTIVITY_TASK_STARTED` event is written to mutable state + // before the task is dispatched to the worker. The started event, and the final event + // (`ACTIVITY_TASK_COMPLETED` / `ACTIVITY_TASK_FAILED` / `ACTIVITY_TASK_TIMED_OUT`) will both be + // written permanently to Workflow execution history when Activity is finished. This is done to + // avoid writing many events in the case of a failure/retry loop. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) + rpc PollActivityTaskQueue (PollActivityTaskQueueRequest) returns (PollActivityTaskQueueResponse) { + } + + // RecordActivityTaskHeartbeat is optionally called by workers while they execute activities. + // + // If worker fails to heartbeat within the `heartbeat_timeout` interval for the activity task, + // then it will be marked as timed out and an `ACTIVITY_TASK_TIMED_OUT` event will be written to + // the workflow history. Calling `RecordActivityTaskHeartbeat` will fail with `NotFound` in + // such situations, in that event, the SDK should request cancellation of the activity. + rpc RecordActivityTaskHeartbeat (RecordActivityTaskHeartbeatRequest) returns (RecordActivityTaskHeartbeatResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/heartbeat" + body: "*" + }; + } + + // See `RecordActivityTaskHeartbeat`. This version allows clients to record heartbeats by + // namespace/workflow id/activity id instead of task token. + // + // (-- api-linter: core::0136::prepositions=disabled + // aip.dev/not-precedent: "By" is used to indicate request type. --) + rpc RecordActivityTaskHeartbeatById (RecordActivityTaskHeartbeatByIdRequest) returns (RecordActivityTaskHeartbeatByIdResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/heartbeat-by-id" + body: "*" + }; + } + + // RespondActivityTaskCompleted is called by workers when they successfully complete an activity + // task. + // + // This results in a new `ACTIVITY_TASK_COMPLETED` event being written to the workflow history + // and a new workflow task created for the workflow. Fails with `NotFound` if the task token is + // no longer valid due to activity timeout, already being completed, or never having existed. + rpc RespondActivityTaskCompleted (RespondActivityTaskCompletedRequest) returns (RespondActivityTaskCompletedResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/complete" + body: "*" + }; + } + + // See `RecordActivityTaskCompleted`. This version allows clients to record completions by + // namespace/workflow id/activity id instead of task token. + // + // (-- api-linter: core::0136::prepositions=disabled + // aip.dev/not-precedent: "By" is used to indicate request type. --) + rpc RespondActivityTaskCompletedById (RespondActivityTaskCompletedByIdRequest) returns (RespondActivityTaskCompletedByIdResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/complete-by-id" + body: "*" + }; + } + + // RespondActivityTaskFailed is called by workers when processing an activity task fails. + // + // This results in a new `ACTIVITY_TASK_FAILED` event being written to the workflow history and + // a new workflow task created for the workflow. Fails with `NotFound` if the task token is no + // longer valid due to activity timeout, already being completed, or never having existed. + rpc RespondActivityTaskFailed (RespondActivityTaskFailedRequest) returns (RespondActivityTaskFailedResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/fail" + body: "*" + }; + } + + // See `RecordActivityTaskFailed`. This version allows clients to record failures by + // namespace/workflow id/activity id instead of task token. + // + // (-- api-linter: core::0136::prepositions=disabled + // aip.dev/not-precedent: "By" is used to indicate request type. --) + rpc RespondActivityTaskFailedById (RespondActivityTaskFailedByIdRequest) returns (RespondActivityTaskFailedByIdResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/fail-by-id" + body: "*" + }; + } + + // RespondActivityTaskFailed is called by workers when processing an activity task fails. + // + // This results in a new `ACTIVITY_TASK_CANCELED` event being written to the workflow history + // and a new workflow task created for the workflow. Fails with `NotFound` if the task token is + // no longer valid due to activity timeout, already being completed, or never having existed. + rpc RespondActivityTaskCanceled (RespondActivityTaskCanceledRequest) returns (RespondActivityTaskCanceledResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/cancel" + body: "*" + }; + } + + // See `RecordActivityTaskCanceled`. This version allows clients to record failures by + // namespace/workflow id/activity id instead of task token. + // + // (-- api-linter: core::0136::prepositions=disabled + // aip.dev/not-precedent: "By" is used to indicate request type. --) + rpc RespondActivityTaskCanceledById (RespondActivityTaskCanceledByIdRequest) returns (RespondActivityTaskCanceledByIdResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/activities/cancel-by-id" + body: "*" + }; + } + + // RequestCancelWorkflowExecution is called by workers when they want to request cancellation of + // a workflow execution. + // + // This results in a new `WORKFLOW_EXECUTION_CANCEL_REQUESTED` event being written to the + // workflow history and a new workflow task created for the workflow. It returns success if the requested + // workflow is already closed. It fails with 'NotFound' if the requested workflow doesn't exist. + rpc RequestCancelWorkflowExecution (RequestCancelWorkflowExecutionRequest) returns (RequestCancelWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_execution.workflow_id}/cancel" + body: "*" + }; + } + + // SignalWorkflowExecution is used to send a signal to a running workflow execution. + // + // This results in a `WORKFLOW_EXECUTION_SIGNALED` event recorded in the history and a workflow + // task being created for the execution. + rpc SignalWorkflowExecution (SignalWorkflowExecutionRequest) returns (SignalWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_execution.workflow_id}/signal/{signal_name}" + body: "*" + }; + } + + // SignalWithStartWorkflowExecution is used to ensure a signal is sent to a workflow, even if + // it isn't yet started. + // + // If the workflow is running, a `WORKFLOW_EXECUTION_SIGNALED` event is recorded in the history + // and a workflow task is generated. + // + // If the workflow is not running or not found, then the workflow is created with + // `WORKFLOW_EXECUTION_STARTED` and `WORKFLOW_EXECUTION_SIGNALED` events in its history, and a + // workflow task is generated. + // + // (-- api-linter: core::0136::prepositions=disabled + // aip.dev/not-precedent: "With" is used to indicate combined operation. --) + rpc SignalWithStartWorkflowExecution (SignalWithStartWorkflowExecutionRequest) returns (SignalWithStartWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_id}/signal-with-start/{signal_name}" + body: "*" + }; + } + + // ResetWorkflowExecution will reset an existing workflow execution to a specified + // `WORKFLOW_TASK_COMPLETED` event (exclusive). It will immediately terminate the current + // execution instance. + // TODO: Does exclusive here mean *just* the completed event, or also WFT started? Otherwise the task is doomed to time out? + rpc ResetWorkflowExecution (ResetWorkflowExecutionRequest) returns (ResetWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_execution.workflow_id}/reset" + body: "*" + }; + } + + // TerminateWorkflowExecution terminates an existing workflow execution by recording a + // `WORKFLOW_EXECUTION_TERMINATED` event in the history and immediately terminating the + // execution instance. + rpc TerminateWorkflowExecution (TerminateWorkflowExecutionRequest) returns (TerminateWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_execution.workflow_id}/terminate" + body: "*" + }; + } + + // DeleteWorkflowExecution asynchronously deletes a specific Workflow Execution (when + // WorkflowExecution.run_id is provided) or the latest Workflow Execution (when + // WorkflowExecution.run_id is not provided). If the Workflow Execution is Running, it will be + // terminated before deletion. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: Workflow deletion not exposed to HTTP, users should use cancel or terminate. --) + rpc DeleteWorkflowExecution (DeleteWorkflowExecutionRequest) returns (DeleteWorkflowExecutionResponse) {} + + // ListOpenWorkflowExecutions is a visibility API to list the open executions in a specific namespace. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) + rpc ListOpenWorkflowExecutions (ListOpenWorkflowExecutionsRequest) returns (ListOpenWorkflowExecutionsResponse) {} + + // ListClosedWorkflowExecutions is a visibility API to list the closed executions in a specific namespace. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) + rpc ListClosedWorkflowExecutions (ListClosedWorkflowExecutionsRequest) returns (ListClosedWorkflowExecutionsResponse) {} + + // ListWorkflowExecutions is a visibility API to list workflow executions in a specific namespace. + rpc ListWorkflowExecutions (ListWorkflowExecutionsRequest) returns (ListWorkflowExecutionsResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/workflows" + }; + } + + // ListArchivedWorkflowExecutions is a visibility API to list archived workflow executions in a specific namespace. + rpc ListArchivedWorkflowExecutions (ListArchivedWorkflowExecutionsRequest) returns (ListArchivedWorkflowExecutionsResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/archived-workflows" + }; + } + + // ScanWorkflowExecutions is a visibility API to list large amount of workflow executions in a specific namespace without order. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: HTTP users should use ListWorkflowExecutions instead. --) + rpc ScanWorkflowExecutions (ScanWorkflowExecutionsRequest) returns (ScanWorkflowExecutionsResponse) { + } + + // CountWorkflowExecutions is a visibility API to count of workflow executions in a specific namespace. + rpc CountWorkflowExecutions (CountWorkflowExecutionsRequest) returns (CountWorkflowExecutionsResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/workflow-count" + }; + } + + // GetSearchAttributes is a visibility API to get all legal keys that could be used in list APIs + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose this search attribute API to HTTP (but may expose on OperatorService). --) + rpc GetSearchAttributes (GetSearchAttributesRequest) returns (GetSearchAttributesResponse) {} + + // RespondQueryTaskCompleted is called by workers to complete queries which were delivered on + // the `query` (not `queries`) field of a `PollWorkflowTaskQueueResponse`. + // + // Completing the query will unblock the corresponding client call to `QueryWorkflow` and return + // the query result a response. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) + rpc RespondQueryTaskCompleted (RespondQueryTaskCompletedRequest) returns (RespondQueryTaskCompletedResponse) {} + + // ResetStickyTaskQueue resets the sticky task queue related information in the mutable state of + // a given workflow. This is prudent for workers to perform if a workflow has been paged out of + // their cache. + // + // Things cleared are: + // 1. StickyTaskQueue + // 2. StickyScheduleToStartTimeout + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose worker API to HTTP. --) + rpc ResetStickyTaskQueue (ResetStickyTaskQueueRequest) returns (ResetStickyTaskQueueResponse) { + } + + // QueryWorkflow requests a query be executed for a specified workflow execution. + rpc QueryWorkflow (QueryWorkflowRequest) returns (QueryWorkflowResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{execution.workflow_id}/query/{query.query_type}" + body: "*" + }; + } + + // DescribeWorkflowExecution returns information about the specified workflow execution. + rpc DescribeWorkflowExecution (DescribeWorkflowExecutionRequest) returns (DescribeWorkflowExecutionResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/workflows/{execution.workflow_id}" + }; + } + + // DescribeTaskQueue returns information about the target task queue. + rpc DescribeTaskQueue (DescribeTaskQueueRequest) returns (DescribeTaskQueueResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/task-queues/{task_queue.name}" + }; + } + + // GetClusterInfo returns information about temporal cluster + rpc GetClusterInfo(GetClusterInfoRequest) returns (GetClusterInfoResponse) { + option (google.api.http) = { + get: "/api/v1/cluster-info" + }; + } + + // GetSystemInfo returns information about the system. + rpc GetSystemInfo(GetSystemInfoRequest) returns (GetSystemInfoResponse) { + option (google.api.http) = { + get: "/api/v1/system-info" + }; + } + + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do not expose this low-level API to HTTP. --) + rpc ListTaskQueuePartitions(ListTaskQueuePartitionsRequest) returns (ListTaskQueuePartitionsResponse) { + } + + // Creates a new schedule. + rpc CreateSchedule (CreateScheduleRequest) returns (CreateScheduleResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}" + body: "*" + }; + } + + // Returns the schedule description and current state of an existing schedule. + rpc DescribeSchedule (DescribeScheduleRequest) returns (DescribeScheduleResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}" + }; + } + + // Changes the configuration or state of an existing schedule. + rpc UpdateSchedule (UpdateScheduleRequest) returns (UpdateScheduleResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}/update" + body: "*" + }; + } + + // Makes a specific change to a schedule or triggers an immediate action. + rpc PatchSchedule (PatchScheduleRequest) returns (PatchScheduleResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}/patch" + body: "*" + }; + } + + // Lists matching times within a range. + rpc ListScheduleMatchingTimes (ListScheduleMatchingTimesRequest) returns (ListScheduleMatchingTimesResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}/matching-times" + }; + } + + // Deletes a schedule, removing it from the system. + rpc DeleteSchedule (DeleteScheduleRequest) returns (DeleteScheduleResponse) { + option (google.api.http) = { + delete: "/api/v1/namespaces/{namespace}/schedules/{schedule_id}" + }; + } + + // List all schedules in a namespace. + rpc ListSchedules (ListSchedulesRequest) returns (ListSchedulesResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/schedules" + }; + } + + // Allows users to specify sets of worker build id versions on a per task queue basis. Versions + // are ordered, and may be either compatible with some extant version, or a new incompatible + // version, forming sets of ids which are incompatible with each other, but whose contained + // members are compatible with one another. + // + // A single build id may be mapped to multiple task queues using this API for cases where a single process hosts + // multiple workers. + // + // To query which workers can be retired, use the `GetWorkerTaskReachability` API. + // + // NOTE: The number of task queues mapped to a single build id is limited by the `limit.taskQueuesPerBuildId` + // (default is 20), if this limit is exceeded this API will error with a FailedPrecondition. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We do yet expose versioning API to HTTP. --) + rpc UpdateWorkerBuildIdCompatibility (UpdateWorkerBuildIdCompatibilityRequest) returns (UpdateWorkerBuildIdCompatibilityResponse) {} + + // Fetches the worker build id versioning sets for a task queue. + rpc GetWorkerBuildIdCompatibility (GetWorkerBuildIdCompatibilityRequest) returns (GetWorkerBuildIdCompatibilityResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/task-queues/{task_queue}/worker-build-id-compatibility" + }; + } + + // Fetches task reachability to determine whether a worker may be retired. + // The request may specify task queues to query for or let the server fetch all task queues mapped to the given + // build IDs. + // + // When requesting a large number of task queues or all task queues associated with the given build ids in a + // namespace, all task queues will be listed in the response but some of them may not contain reachability + // information due to a server enforced limit. When reaching the limit, task queues that reachability information + // could not be retrieved for will be marked with a single TASK_REACHABILITY_UNSPECIFIED entry. The caller may issue + // another call to get the reachability for those task queues. + // + // Open source users can adjust this limit by setting the server's dynamic config value for + // `limit.reachabilityTaskQueueScan` with the caveat that this call can strain the visibility store. + rpc GetWorkerTaskReachability (GetWorkerTaskReachabilityRequest) returns (GetWorkerTaskReachabilityResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/worker-task-reachability" + }; + } + + // Invokes the specified update function on user workflow code. + rpc UpdateWorkflowExecution(UpdateWorkflowExecutionRequest) returns (UpdateWorkflowExecutionResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/workflows/{workflow_execution.workflow_id}/update/{request.input.name}" + body: "*" + }; + } + + // Polls a workflow execution for the outcome of a workflow execution update + // previously issued through the UpdateWorkflowExecution RPC. The effective + // timeout on this call will be shorter of the the caller-supplied gRPC + // timeout and the server's configured long-poll timeout. + // + // (-- api-linter: core::0127::http-annotation=disabled + // aip.dev/not-precedent: We don't expose update polling API to HTTP in favor of a potential future non-blocking form. --) + rpc PollWorkflowExecutionUpdate(PollWorkflowExecutionUpdateRequest) returns (PollWorkflowExecutionUpdateResponse) { + } + + // StartBatchOperation starts a new batch operation + rpc StartBatchOperation(StartBatchOperationRequest) returns (StartBatchOperationResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/batch-operations/{job_id}" + body: "*" + }; + } + + // StopBatchOperation stops a batch operation + rpc StopBatchOperation(StopBatchOperationRequest) returns (StopBatchOperationResponse) { + option (google.api.http) = { + post: "/api/v1/namespaces/{namespace}/batch-operations/{job_id}/stop" + body: "*" + }; + } + + // DescribeBatchOperation returns the information about a batch operation + rpc DescribeBatchOperation(DescribeBatchOperationRequest) returns (DescribeBatchOperationResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/batch-operations/{job_id}" + }; + } + + // ListBatchOperations returns a list of batch operations + rpc ListBatchOperations(ListBatchOperationsRequest) returns (ListBatchOperationsResponse) { + option (google.api.http) = { + get: "/api/v1/namespaces/{namespace}/batch-operations" + }; + } +} diff --git a/Makefile b/Makefile index cf901cd..c97d5ff 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ ci-build: install proto install: buf-install grpc-install openapiv2-install # Run all linters and compile proto files. -proto: grpc +proto: buf-lint buf-breaking clean $(PROTO_OUT) proto-go proto-openapiv2 ######################################################################## ##### Variables ###### @@ -19,16 +19,48 @@ SHELL := PATH=$(GOBIN):$(PATH) /bin/sh COLOR := "\e[1;36m%s\e[0m\n" +PROTO_ROOT := . +TEMPORAL_API_ROOT := $(PROTO_ROOT)/.temporal-api +PROTO_FILES = $(shell find $(PROTO_ROOT)/temporal -name "*.proto") +SERVICE_PROTO_FILES = $(shell find $(PROTO_ROOT)/temporal -name "*service.proto") +PROTO_DIRS = $(sort $(dir $(PROTO_FILES))) +SERVICE_PROTO_DIRS = $(sort $(dir $(SERVICE_PROTO_FILES))) PROTO_OUT := .gen -$(PROTO_OUT): - mkdir $(PROTO_OUT) +PROTO_IMPORTS = -I=$(PROTO_ROOT) -I=$(TEMPORAL_API_ROOT) -##### Compile proto files for go ##### -grpc: buf-lint buf-breaking go-grpc +$(PROTO_OUT): clean + mkdir -p $(PROTO_OUT)/go + mkdir -p $(PROTO_OUT)/openapiv2 + +##### Copy the proto files from the temporal api repo ##### +copy-temporal-api: + @printf $(COLOR) "Copy temporal apis..." + rm -rf $(TEMPORAL_API_ROOT) + mkdir -p $(TEMPORAL_API_ROOT) + git clone git@github.com:temporalio/api --depth=1 --branch master --single-branch $(PROTO_ROOT)/.temporal-api-tmp + mv -f $(PROTO_ROOT)/.temporal-api-tmp/temporal $(TEMPORAL_API_ROOT)/temporal + mv -f $(PROTO_ROOT)/.temporal-api-tmp/dependencies $(TEMPORAL_API_ROOT)/dependencies + mv -f $(PROTO_ROOT)/.temporal-api-tmp/google $(TEMPORAL_API_ROOT)/google + rm -rf $(PROTO_ROOT)/.temporal-api-tmp -go-grpc: clean $(PROTO_OUT) +##### Compile proto files for go ##### +proto-go: printf $(COLOR) "Compile for go-gRPC..." - buf generate --output $(PROTO_OUT) + $(foreach PROTO_DIR,$(PROTO_DIRS),protoc \ + --fatal_warnings \ + $(PROTO_IMPORTS) \ + --go_out $(PROTO_OUT)/go --go_opt paths=source_relative \ + --go-grpc_out $(PROTO_OUT)/go --go-grpc_opt paths=source_relative \ + $(PROTO_DIR)*.proto;) + +##### Compile proto files for openapiv2 ##### +proto-openapiv2: + $(foreach SERVICE_PROTO_DIR,$(SERVICE_PROTO_DIRS),protoc \ + --fatal_warnings \ + $(PROTO_IMPORTS) \ + --openapiv2_out $(PROTO_OUT)/openapiv2 --openapiv2_opt output_format=yaml --openapiv2_opt allow_delete_body \ + $(SERVICE_PROTO_DIR)*service.proto;) + ##### Plugins & tools ##### buf-install: @@ -51,9 +83,9 @@ buf-lint: buf-breaking: @printf $(COLOR) "Run buf breaking changes check against main branch..." - buf breaking --against 'https://github.com/temporalio/api-cloud.git#branch=main' + buf breaking --against 'https://github.com/temporalio/temporal.git#branch=main' ##### Clean ##### clean: - printf $(COLOR) "Delete generated go files..." + printf $(COLOR) "Delete generated files..." rm -rf $(PROTO_OUT) diff --git a/buf.gen.yaml b/buf.gen.yaml deleted file mode 100644 index ea5bdf5..0000000 --- a/buf.gen.yaml +++ /dev/null @@ -1,14 +0,0 @@ -version: v1 -plugins: - - plugin: go - out: go - opt: paths=source_relative - - plugin: go-grpc - out: go - opt: - - paths=source_relative - - plugin: openapiv2 - out: openapiv2 - opt: - - output_format=yaml - - allow_delete_body diff --git a/buf.lock b/buf.lock index 3435191..c91b581 100644 --- a/buf.lock +++ b/buf.lock @@ -1,8 +1,2 @@ # Generated by buf. DO NOT EDIT. version: v1 -deps: - - remote: buf.build - owner: googleapis - repository: googleapis - commit: 711e289f6a384c4caeebaff7c6931ade - digest: shake256:e08fb55dad7469f69df00304eed31427d2d1576e9aab31e6bf86642688e04caaf0372f15fe6974cf79432779a635b3ea401ca69c943976dc42749524e4c25d94 diff --git a/buf.yaml b/buf.yaml index 69baf0f..b6c72c8 100644 --- a/buf.yaml +++ b/buf.yaml @@ -1,9 +1,11 @@ version: v1 deps: - - buf.build/googleapis/googleapis breaking: use: - FILE lint: use: - DEFAULT + ignore: + - .temporal-api + diff --git a/temporal/api/cloud/export/v1/message.proto b/temporal/api/cloud/export/v1/message.proto new file mode 100644 index 0000000..3cdd33b --- /dev/null +++ b/temporal/api/cloud/export/v1/message.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package temporal.api.cloud.export.v1; + +option go_package = "go.temporal.io/api/cloud/export/v1;export"; + +import "temporal/api/history/v1/message.proto"; + +message ExportedWorkflows { + repeated ExportedWorkflow workflows = 1; +} + +message ExportedWorkflow { + temporal.api.history.v1.History history = 1; +}