Vendor dependencies for Bigtable client.

This commit is contained in:
Riley Karson 2017-06-09 17:23:19 -07:00
parent 38d7ba566c
commit 5a71aa3649
128 changed files with 25677 additions and 1901 deletions

371
vendor/cloud.google.com/go/bigtable/admin.go generated vendored Normal file
View File

@ -0,0 +1,371 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"fmt"
"regexp"
"strings"
btopt "cloud.google.com/go/bigtable/internal/option"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const adminAddr = "bigtableadmin.googleapis.com:443"
// AdminClient is a client type for performing admin operations within a specific instance.
type AdminClient struct {
conn *grpc.ClientConn
tClient btapb.BigtableTableAdminClient
project, instance string
// Metadata to be sent with each request.
md metadata.MD
}
// NewAdminClient creates a new AdminClient for a given project and instance.
func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) {
o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent)
if err != nil {
return nil, err
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
return &AdminClient{
conn: conn,
tClient: btapb.NewBigtableTableAdminClient(conn),
project: project,
instance: instance,
md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)),
}, nil
}
// Close closes the AdminClient.
func (ac *AdminClient) Close() error {
return ac.conn.Close()
}
func (ac *AdminClient) instancePrefix() string {
return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance)
}
// Tables returns a list of the tables in the instance.
func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.ListTablesRequest{
Parent: prefix,
}
res, err := ac.tClient.ListTables(ctx, req)
if err != nil {
return nil, err
}
names := make([]string, 0, len(res.Tables))
for _, tbl := range res.Tables {
names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/"))
}
return names, nil
}
// CreateTable creates a new table in the instance.
// This method may return before the table's creation is complete.
func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.CreateTableRequest{
Parent: prefix,
TableId: table,
}
_, err := ac.tClient.CreateTable(ctx, req)
return err
}
// CreatePresplitTable creates a new table in the instance.
// The list of row keys will be used to initially split the table into multiple tablets.
// Given two split keys, "s1" and "s2", three tablets will be created,
// spanning the key ranges: [, s1), [s1, s2), [s2, ).
// This method may return before the table's creation is complete.
func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, split_keys []string) error {
var req_splits []*btapb.CreateTableRequest_Split
for _, split := range split_keys {
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)})
}
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.CreateTableRequest{
Parent: prefix,
TableId: table,
InitialSplits: req_splits,
}
_, err := ac.tClient.CreateTable(ctx, req)
return err
}
// CreateColumnFamily creates a new column family in a table.
func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error {
// TODO(dsymonds): Permit specifying gcexpr and any other family settings.
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.ModifyColumnFamiliesRequest{
Name: prefix + "/tables/" + table,
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
Id: family,
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
}},
}
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
return err
}
// DeleteTable deletes a table and all of its data.
func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.DeleteTableRequest{
Name: prefix + "/tables/" + table,
}
_, err := ac.tClient.DeleteTable(ctx, req)
return err
}
// DeleteColumnFamily deletes a column family in a table and all of its data.
func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.ModifyColumnFamiliesRequest{
Name: prefix + "/tables/" + table,
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
Id: family,
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true},
}},
}
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
return err
}
// TableInfo represents information about a table.
type TableInfo struct {
// DEPRECATED - This field is deprecated. Please use FamilyInfos instead.
Families []string
FamilyInfos []FamilyInfo
}
// FamilyInfo represents information about a column family.
type FamilyInfo struct {
Name string
GCPolicy string
}
// TableInfo retrieves information about a table.
func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.GetTableRequest{
Name: prefix + "/tables/" + table,
}
res, err := ac.tClient.GetTable(ctx, req)
if err != nil {
return nil, err
}
ti := &TableInfo{}
for name, fam := range res.ColumnFamilies {
ti.Families = append(ti.Families, name)
ti.FamilyInfos = append(ti.FamilyInfos, FamilyInfo{Name: name, GCPolicy: GCRuleToString(fam.GcRule)})
}
return ti, nil
}
// SetGCPolicy specifies which cells in a column family should be garbage collected.
// GC executes opportunistically in the background; table reads may return data
// matching the GC policy.
func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.ModifyColumnFamiliesRequest{
Name: prefix + "/tables/" + table,
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
Id: family,
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}},
}},
}
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
return err
}
// DropRowRange permanently deletes a row range from the specified table.
func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix string) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.DropRowRangeRequest{
Name: prefix + "/tables/" + table,
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte(rowKeyPrefix)},
}
_, err := ac.tClient.DropRowRange(ctx, req)
return err
}
const instanceAdminAddr = "bigtableadmin.googleapis.com:443"
// InstanceAdminClient is a client type for performing admin operations on instances.
// These operations can be substantially more dangerous than those provided by AdminClient.
type InstanceAdminClient struct {
conn *grpc.ClientConn
iClient btapb.BigtableInstanceAdminClient
lroClient *lroauto.OperationsClient
project string
// Metadata to be sent with each request.
md metadata.MD
}
// NewInstanceAdminClient creates a new InstanceAdminClient for a given project.
func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) {
o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent)
if err != nil {
return nil, err
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil {
// This error "should not happen", since we are just reusing old connection
// and never actually need to dial.
// If this does happen, we could leak conn. However, we cannot close conn:
// If the user invoked the function with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO(pongad): investigate error conditions.
return nil, err
}
return &InstanceAdminClient{
conn: conn,
iClient: btapb.NewBigtableInstanceAdminClient(conn),
lroClient: lroClient,
project: project,
md: metadata.Pairs(resourcePrefixHeader, "projects/"+project),
}, nil
}
// Close closes the InstanceAdminClient.
func (iac *InstanceAdminClient) Close() error {
return iac.conn.Close()
}
// StorageType is the type of storage used for all tables in an instance
type StorageType int
const (
SSD StorageType = iota
HDD
)
func (st StorageType) proto() btapb.StorageType {
if st == HDD {
return btapb.StorageType_HDD
}
return btapb.StorageType_SSD
}
// InstanceInfo represents information about an instance
type InstanceInfo struct {
Name string // name of the instance
DisplayName string // display name for UIs
}
// InstanceConf contains the information necessary to create an Instance
type InstanceConf struct {
InstanceId, DisplayName, ClusterId, Zone string
NumNodes int32
StorageType StorageType
}
var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`)
// CreateInstance creates a new instance in the project.
// This method will return when the instance has been created or when an error occurs.
func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error {
ctx = mergeOutgoingMetadata(ctx, iac.md)
req := &btapb.CreateInstanceRequest{
Parent: "projects/" + iac.project,
InstanceId: conf.InstanceId,
Instance: &btapb.Instance{DisplayName: conf.DisplayName},
Clusters: map[string]*btapb.Cluster{
conf.ClusterId: {
ServeNodes: conf.NumNodes,
DefaultStorageType: conf.StorageType.proto(),
Location: "projects/" + iac.project + "/locations/" + conf.Zone,
},
},
}
lro, err := iac.iClient.CreateInstance(ctx, req)
if err != nil {
return err
}
resp := btapb.Instance{}
return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, &resp)
}
// DeleteInstance deletes an instance from the project.
func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error {
ctx = mergeOutgoingMetadata(ctx, iac.md)
req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId}
_, err := iac.iClient.DeleteInstance(ctx, req)
return err
}
// Instances returns a list of instances in the project.
func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) {
ctx = mergeOutgoingMetadata(ctx, iac.md)
req := &btapb.ListInstancesRequest{
Parent: "projects/" + iac.project,
}
res, err := iac.iClient.ListInstances(ctx, req)
if err != nil {
return nil, err
}
var is []*InstanceInfo
for _, i := range res.Instances {
m := instanceNameRegexp.FindStringSubmatch(i.Name)
if m == nil {
return nil, fmt.Errorf("malformed instance name %q", i.Name)
}
is = append(is, &InstanceInfo{
Name: m[2],
DisplayName: i.DisplayName,
})
}
return is, nil
}

782
vendor/cloud.google.com/go/bigtable/bigtable.go generated vendored Normal file
View File

@ -0,0 +1,782 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable // import "cloud.google.com/go/bigtable"
import (
"errors"
"fmt"
"io"
"strconv"
"time"
"cloud.google.com/go/bigtable/internal/gax"
btopt "cloud.google.com/go/bigtable/internal/option"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
const prodAddr = "bigtable.googleapis.com:443"
// Client is a client for reading and writing data to tables in an instance.
//
// A Client is safe to use concurrently, except for its Close method.
type Client struct {
conn *grpc.ClientConn
client btpb.BigtableClient
project, instance string
}
// NewClient creates a new Client for a given project and instance.
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) {
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
if err != nil {
return nil, err
}
// Default to a small connection pool that can be overridden.
o = append(o, option.WithGRPCConnectionPool(4))
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
return &Client{
conn: conn,
client: btpb.NewBigtableClient(conn),
project: project,
instance: instance,
}, nil
}
// Close closes the Client.
func (c *Client) Close() error {
return c.conn.Close()
}
var (
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted, codes.Internal}
isIdempotentRetryCode = make(map[codes.Code]bool)
retryOptions = []gax.CallOption{
gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2),
gax.WithRetryCodes(idempotentRetryCodes),
}
)
func init() {
for _, code := range idempotentRetryCodes {
isIdempotentRetryCode[code] = true
}
}
func (c *Client) fullTableName(table string) string {
return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table)
}
// A Table refers to a table.
//
// A Table is safe to use concurrently.
type Table struct {
c *Client
table string
// Metadata to be sent with each request.
md metadata.MD
}
// Open opens a table.
func (c *Client) Open(table string) *Table {
return &Table{
c: c,
table: table,
md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)),
}
}
// TODO(dsymonds): Read method that returns a sequence of ReadItems.
// ReadRows reads rows from a table. f is called for each row.
// If f returns false, the stream is shut down and ReadRows returns.
// f owns its argument, and f is called serially in order by row key.
//
// By default, the yielded rows will contain all values in all cells.
// Use RowFilter to limit the cells returned.
func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error {
ctx = mergeOutgoingMetadata(ctx, t.md)
var prevRowKey string
err := gax.Invoke(ctx, func(ctx context.Context) error {
req := &btpb.ReadRowsRequest{
TableName: t.c.fullTableName(t.table),
Rows: arg.proto(),
}
for _, opt := range opts {
opt.set(req)
}
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
defer cancel()
stream, err := t.c.client.ReadRows(ctx, req)
if err != nil {
return err
}
cr := newChunkReader()
for {
res, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
// Reset arg for next Invoke call.
arg = arg.retainRowsAfter(prevRowKey)
return err
}
for _, cc := range res.Chunks {
row, err := cr.Process(cc)
if err != nil {
// No need to prepare for a retry, this is an unretryable error.
return err
}
if row == nil {
continue
}
prevRowKey = row.Key()
if !f(row) {
// Cancel and drain stream.
cancel()
for {
if _, err := stream.Recv(); err != nil {
// The stream has ended. We don't return an error
// because the caller has intentionally interrupted the scan.
return nil
}
}
}
}
if err := cr.Close(); err != nil {
// No need to prepare for a retry, this is an unretryable error.
return err
}
}
return err
}, retryOptions...)
return err
}
// ReadRow is a convenience implementation of a single-row reader.
// A missing row will return a zero-length map and a nil error.
func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) {
var r Row
err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool {
r = rr
return true
}, opts...)
return r, err
}
// decodeFamilyProto adds the cell data from f to the given row.
func decodeFamilyProto(r Row, row string, f *btpb.Family) {
fam := f.Name // does not have colon
for _, col := range f.Columns {
for _, cell := range col.Cells {
ri := ReadItem{
Row: row,
Column: fam + ":" + string(col.Qualifier),
Timestamp: Timestamp(cell.TimestampMicros),
Value: cell.Value,
}
r[fam] = append(r[fam], ri)
}
}
}
// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList.
type RowSet interface {
proto() *btpb.RowSet
// retainRowsAfter returns a new RowSet that does not include the
// given row key or any row key lexicographically less than it.
retainRowsAfter(lastRowKey string) RowSet
// Valid reports whether this set can cover at least one row.
valid() bool
}
// RowList is a sequence of row keys.
type RowList []string
func (r RowList) proto() *btpb.RowSet {
keys := make([][]byte, len(r))
for i, row := range r {
keys[i] = []byte(row)
}
return &btpb.RowSet{RowKeys: keys}
}
func (r RowList) retainRowsAfter(lastRowKey string) RowSet {
var retryKeys RowList
for _, key := range r {
if key > lastRowKey {
retryKeys = append(retryKeys, key)
}
}
return retryKeys
}
func (r RowList) valid() bool {
return len(r) > 0
}
// A RowRange is a half-open interval [Start, Limit) encompassing
// all the rows with keys at least as large as Start, and less than Limit.
// (Bigtable string comparison is the same as Go's.)
// A RowRange can be unbounded, encompassing all keys at least as large as Start.
type RowRange struct {
start string
limit string
}
// NewRange returns the new RowRange [begin, end).
func NewRange(begin, end string) RowRange {
return RowRange{
start: begin,
limit: end,
}
}
// Unbounded tests whether a RowRange is unbounded.
func (r RowRange) Unbounded() bool {
return r.limit == ""
}
// Contains says whether the RowRange contains the key.
func (r RowRange) Contains(row string) bool {
return r.start <= row && (r.limit == "" || r.limit > row)
}
// String provides a printable description of a RowRange.
func (r RowRange) String() string {
a := strconv.Quote(r.start)
if r.Unbounded() {
return fmt.Sprintf("[%s,∞)", a)
}
return fmt.Sprintf("[%s,%q)", a, r.limit)
}
func (r RowRange) proto() *btpb.RowSet {
rr := &btpb.RowRange{
StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)},
}
if !r.Unbounded() {
rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)}
}
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}}
}
func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
if lastRowKey == "" || lastRowKey < r.start {
return r
}
// Set the beginning of the range to the row after the last scanned.
start := lastRowKey + "\x00"
if r.Unbounded() {
return InfiniteRange(start)
}
return NewRange(start, r.limit)
}
func (r RowRange) valid() bool {
return r.start < r.limit
}
// RowRangeList is a sequence of RowRanges representing the union of the ranges.
type RowRangeList []RowRange
func (r RowRangeList) proto() *btpb.RowSet {
ranges := make([]*btpb.RowRange, len(r))
for i, rr := range r {
// RowRange.proto() returns a RowSet with a single element RowRange array
ranges[i] = rr.proto().RowRanges[0]
}
return &btpb.RowSet{RowRanges: ranges}
}
func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet {
if lastRowKey == "" {
return r
}
// Return a list of any range that has not yet been completely processed
var ranges RowRangeList
for _, rr := range r {
retained := rr.retainRowsAfter(lastRowKey)
if retained.valid() {
ranges = append(ranges, retained.(RowRange))
}
}
return ranges
}
func (r RowRangeList) valid() bool {
for _, rr := range r {
if rr.valid() {
return true
}
}
return false
}
// SingleRow returns a RowSet for reading a single row.
func SingleRow(row string) RowSet {
return RowList{row}
}
// PrefixRange returns a RowRange consisting of all keys starting with the prefix.
func PrefixRange(prefix string) RowRange {
return RowRange{
start: prefix,
limit: prefixSuccessor(prefix),
}
}
// InfiniteRange returns the RowRange consisting of all keys at least as
// large as start.
func InfiniteRange(start string) RowRange {
return RowRange{
start: start,
limit: "",
}
}
// prefixSuccessor returns the lexically smallest string greater than the
// prefix, if it exists, or "" otherwise. In either case, it is the string
// needed for the Limit of a RowRange.
func prefixSuccessor(prefix string) string {
if prefix == "" {
return "" // infinite range
}
n := len(prefix)
for n--; n >= 0 && prefix[n] == '\xff'; n-- {
}
if n == -1 {
return ""
}
ans := []byte(prefix[:n])
ans = append(ans, prefix[n]+1)
return string(ans)
}
// A ReadOption is an optional argument to ReadRows.
type ReadOption interface {
set(req *btpb.ReadRowsRequest)
}
// RowFilter returns a ReadOption that applies f to the contents of read rows.
func RowFilter(f Filter) ReadOption { return rowFilter{f} }
type rowFilter struct{ f Filter }
func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() }
// LimitRows returns a ReadOption that will limit the number of rows to be read.
func LimitRows(limit int64) ReadOption { return limitRows{limit} }
type limitRows struct{ limit int64 }
func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit }
// mutationsAreRetryable returns true if all mutations are idempotent
// and therefore retryable. A mutation is idempotent iff all cell timestamps
// have an explicit timestamp set and do not rely on the timestamp being set on the server.
func mutationsAreRetryable(muts []*btpb.Mutation) bool {
serverTime := int64(ServerTime)
for _, mut := range muts {
setCell := mut.GetSetCell()
if setCell != nil && setCell.TimestampMicros == serverTime {
return false
}
}
return true
}
// Apply applies a Mutation to a specific row.
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
ctx = mergeOutgoingMetadata(ctx, t.md)
after := func(res proto.Message) {
for _, o := range opts {
o.after(res)
}
}
var callOptions []gax.CallOption
if m.cond == nil {
req := &btpb.MutateRowRequest{
TableName: t.c.fullTableName(t.table),
RowKey: []byte(row),
Mutations: m.ops,
}
if mutationsAreRetryable(m.ops) {
callOptions = retryOptions
}
var res *btpb.MutateRowResponse
err := gax.Invoke(ctx, func(ctx context.Context) error {
var err error
res, err = t.c.client.MutateRow(ctx, req)
return err
}, callOptions...)
if err == nil {
after(res)
}
return err
}
req := &btpb.CheckAndMutateRowRequest{
TableName: t.c.fullTableName(t.table),
RowKey: []byte(row),
PredicateFilter: m.cond.proto(),
}
if m.mtrue != nil {
req.TrueMutations = m.mtrue.ops
}
if m.mfalse != nil {
req.FalseMutations = m.mfalse.ops
}
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) {
callOptions = retryOptions
}
var cmRes *btpb.CheckAndMutateRowResponse
err := gax.Invoke(ctx, func(ctx context.Context) error {
var err error
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
return err
}, callOptions...)
if err == nil {
after(cmRes)
}
return err
}
// An ApplyOption is an optional argument to Apply.
type ApplyOption interface {
after(res proto.Message)
}
type applyAfterFunc func(res proto.Message)
func (a applyAfterFunc) after(res proto.Message) { a(res) }
// GetCondMutationResult returns an ApplyOption that reports whether the conditional
// mutation's condition matched.
func GetCondMutationResult(matched *bool) ApplyOption {
return applyAfterFunc(func(res proto.Message) {
if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok {
*matched = res.PredicateMatched
}
})
}
// Mutation represents a set of changes for a single row of a table.
type Mutation struct {
ops []*btpb.Mutation
// for conditional mutations
cond Filter
mtrue, mfalse *Mutation
}
// NewMutation returns a new mutation.
func NewMutation() *Mutation {
return new(Mutation)
}
// NewCondMutation returns a conditional mutation.
// The given row filter determines which mutation is applied:
// If the filter matches any cell in the row, mtrue is applied;
// otherwise, mfalse is applied.
// Either given mutation may be nil.
func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse}
}
// Set sets a value in a specified column, with the given timestamp.
// The timestamp will be truncated to millisecond granularity.
// A timestamp of ServerTime means to use the server timestamp.
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
FamilyName: family,
ColumnQualifier: []byte(column),
TimestampMicros: int64(ts.TruncateToMilliseconds()),
Value: value,
}}})
}
// DeleteCellsInColumn will delete all the cells whose columns are family:column.
func (m *Mutation) DeleteCellsInColumn(family, column string) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
FamilyName: family,
ColumnQualifier: []byte(column),
}}})
}
// DeleteTimestampRange deletes all cells whose columns are family:column
// and whose timestamps are in the half-open interval [start, end).
// If end is zero, it will be interpreted as infinity.
// The timestamps will be truncated to millisecond granularity.
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
FamilyName: family,
ColumnQualifier: []byte(column),
TimeRange: &btpb.TimestampRange{
StartTimestampMicros: int64(start.TruncateToMilliseconds()),
EndTimestampMicros: int64(end.TruncateToMilliseconds()),
},
}}})
}
// DeleteCellsInFamily will delete all the cells whose columns are family:*.
func (m *Mutation) DeleteCellsInFamily(family string) {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{
FamilyName: family,
}}})
}
// DeleteRow deletes the entire row.
func (m *Mutation) DeleteRow() {
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}})
}
// entryErr is a container that combines an entry with the error that was returned for it.
// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed.
type entryErr struct {
Entry *btpb.MutateRowsRequest_Entry
Err error
}
// ApplyBulk applies multiple Mutations.
// Each mutation is individually applied atomically,
// but the set of mutations may be applied in any order.
//
// Two types of failures may occur. If the entire process
// fails, (nil, err) will be returned. If specific mutations
// fail to apply, ([]err, nil) will be returned, and the errors
// will correspond to the relevant rowKeys/muts arguments.
//
// Conditional mutations cannot be applied in bulk and providing one will result in an error.
func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) {
ctx = mergeOutgoingMetadata(ctx, t.md)
if len(rowKeys) != len(muts) {
return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts))
}
origEntries := make([]*entryErr, len(rowKeys))
for i, key := range rowKeys {
mut := muts[i]
if mut.cond != nil {
return nil, errors.New("conditional mutations cannot be applied in bulk")
}
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}}
}
// entries will be reduced after each invocation to just what needs to be retried.
entries := make([]*entryErr, len(rowKeys))
copy(entries, origEntries)
err := gax.Invoke(ctx, func(ctx context.Context) error {
err := t.doApplyBulk(ctx, entries, opts...)
if err != nil {
// We want to retry the entire request with the current entries
return err
}
entries = t.getApplyBulkRetries(entries)
if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
// We have at least one mutation that needs to be retried.
// Return an arbitrary error that is retryable according to callOptions.
return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
}
return nil
}, retryOptions...)
if err != nil {
return nil, err
}
// Accumulate all of the errors into an array to return, interspersed with nils for successful
// entries. The absence of any errors means we should return nil.
var errs []error
var foundErr bool
for _, entry := range origEntries {
if entry.Err != nil {
foundErr = true
}
errs = append(errs, entry.Err)
}
if foundErr {
return errs, nil
}
return nil, nil
}
// getApplyBulkRetries returns the entries that need to be retried
func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr {
var retryEntries []*entryErr
for _, entry := range entries {
err := entry.Err
if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) {
// There was an error and the entry is retryable.
retryEntries = append(retryEntries, entry)
}
}
return retryEntries
}
// doApplyBulk does the work of a single ApplyBulk invocation
func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error {
after := func(res proto.Message) {
for _, o := range opts {
o.after(res)
}
}
entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs))
for i, entryErr := range entryErrs {
entries[i] = entryErr.Entry
}
req := &btpb.MutateRowsRequest{
TableName: t.c.fullTableName(t.table),
Entries: entries,
}
stream, err := t.c.client.MutateRows(ctx, req)
if err != nil {
return err
}
for {
res, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}
for i, entry := range res.Entries {
status := entry.Status
if status.Code == int32(codes.OK) {
entryErrs[i].Err = nil
} else {
entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message)
}
}
after(res)
}
return nil
}
// Timestamp is in units of microseconds since 1 January 1970.
type Timestamp int64
// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set.
// It indicates that the server's timestamp should be used.
const ServerTime Timestamp = -1
// Time converts a time.Time into a Timestamp.
func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) }
// Now returns the Timestamp representation of the current time on the client.
func Now() Timestamp { return Time(time.Now()) }
// Time converts a Timestamp into a time.Time.
func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) }
// TruncateToMilliseconds truncates a Timestamp to millisecond granularity,
// which is currently the only granularity supported.
func (ts Timestamp) TruncateToMilliseconds() Timestamp {
if ts == ServerTime {
return ts
}
return ts - ts%1000
}
// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.
// It returns the newly written cells.
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
ctx = mergeOutgoingMetadata(ctx, t.md)
req := &btpb.ReadModifyWriteRowRequest{
TableName: t.c.fullTableName(t.table),
RowKey: []byte(row),
Rules: m.ops,
}
res, err := t.c.client.ReadModifyWriteRow(ctx, req)
if err != nil {
return nil, err
}
if res.Row == nil {
return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil")
}
r := make(Row)
for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family
decodeFamilyProto(r, row, fam)
}
return r, nil
}
// ReadModifyWrite represents a set of operations on a single row of a table.
// It is like Mutation but for non-idempotent changes.
// When applied, these operations operate on the latest values of the row's cells,
// and result in a new value being written to the relevant cell with a timestamp
// that is max(existing timestamp, current server time).
//
// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will
// be executed serially by the server.
type ReadModifyWrite struct {
ops []*btpb.ReadModifyWriteRule
}
// NewReadModifyWrite returns a new ReadModifyWrite.
func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) }
// AppendValue appends a value to a specific cell's value.
// If the cell is unset, it will be treated as an empty value.
func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
FamilyName: family,
ColumnQualifier: []byte(column),
Rule: &btpb.ReadModifyWriteRule_AppendValue{v},
})
}
// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer,
// and adds a value to it. If the cell is unset, it will be treated as zero.
// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite
// operation will fail.
func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
FamilyName: family,
ColumnQualifier: []byte(column),
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta},
})
}
// mergeOutgoingMetadata returns a context populated by the existing outgoing metadata,
// if any, joined with internal metadata.
func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context {
mdCopy, _ := metadata.FromOutgoingContext(ctx)
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
}

125
vendor/cloud.google.com/go/bigtable/doc.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package bigtable is an API to Google Cloud Bigtable.
See https://cloud.google.com/bigtable/docs/ for general product documentation.
Setup and Credentials
Use NewClient or NewAdminClient to create a client that can be used to access
the data or admin APIs respectively. Both require credentials that have permission
to access the Cloud Bigtable API.
If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials
(https://developers.google.com/accounts/docs/application-default-credentials)
is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called.
To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource.
For instance, you can use service account credentials by visiting
https://cloud.google.com/console/project/MYPROJECT/apiui/credential,
creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing
jsonKey, err := ioutil.ReadFile(pathToKeyFile)
...
config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc.
...
client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx)))
...
Here, `google` means the golang.org/x/oauth2/google package
and `option` means the google.golang.org/api/option package.
Reading
The principal way to read from a Bigtable is to use the ReadRows method on *Table.
A RowRange specifies a contiguous portion of a table. A Filter may be provided through
RowFilter to limit or transform the data that is returned.
tbl := client.Open("mytable")
...
// Read all the rows starting with "com.google.",
// but only fetch the columns in the "links" family.
rr := bigtable.PrefixRange("com.google.")
err := tbl.ReadRows(ctx, rr, func(r Row) bool {
// do something with r
return true // keep going
}, bigtable.RowFilter(bigtable.FamilyFilter("links")))
...
To read a single row, use the ReadRow helper method.
r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key
...
Writing
This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite.
The former expresses idempotent operations.
The latter expresses non-idempotent operations and returns the new values of updated cells.
These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite),
building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite
methods on a Table.
For instance, to set a couple of cells in a table,
tbl := client.Open("mytable")
mut := bigtable.NewMutation()
mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1"))
mut.Set("links", "golang.org", bigtable.Now(), []byte("1"))
err := tbl.Apply(ctx, "com.google.cloud", mut)
...
To increment an encoded value in one cell,
tbl := client.Open("mytable")
rmw := bigtable.NewReadModifyWrite()
rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org"
r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw)
...
Retries
If a read or write operation encounters a transient error it will be retried until a successful
response, an unretryable error or the context deadline is reached. Non-idempotent writes (where
the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls
will not re-scan rows that have already been processed.
Authentication
See examples of authorization and authentication at
https://godoc.org/cloud.google.com/go#pkg-examples.
*/
package bigtable // import "cloud.google.com/go/bigtable"
// Scope constants for authentication credentials.
// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile.
const (
// Scope is the OAuth scope for Cloud Bigtable data operations.
Scope = "https://www.googleapis.com/auth/bigtable.data"
// ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations.
ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly"
// AdminScope is the OAuth scope for Cloud Bigtable table admin operations.
AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table"
// InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations.
InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster"
)
// clientUserAgent identifies the version of this package.
// It should be bumped upon significant changes only.
const clientUserAgent = "cbt-go/20160628"
// resourcePrefixHeader is the name of the metadata header used to indicate
// the resource being operated on.
const resourcePrefixHeader = "google-cloud-resource-prefix"

288
vendor/cloud.google.com/go/bigtable/filter.go generated vendored Normal file
View File

@ -0,0 +1,288 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"fmt"
"strings"
"time"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
)
// A Filter represents a row filter.
type Filter interface {
String() string
proto() *btpb.RowFilter
}
// ChainFilters returns a filter that applies a sequence of filters.
func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} }
type chainFilter struct {
sub []Filter
}
func (cf chainFilter) String() string {
var ss []string
for _, sf := range cf.sub {
ss = append(ss, sf.String())
}
return "(" + strings.Join(ss, " | ") + ")"
}
func (cf chainFilter) proto() *btpb.RowFilter {
chain := &btpb.RowFilter_Chain{}
for _, sf := range cf.sub {
chain.Filters = append(chain.Filters, sf.proto())
}
return &btpb.RowFilter{
Filter: &btpb.RowFilter_Chain_{chain},
}
}
// InterleaveFilters returns a filter that applies a set of filters in parallel
// and interleaves the results.
func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} }
type interleaveFilter struct {
sub []Filter
}
func (ilf interleaveFilter) String() string {
var ss []string
for _, sf := range ilf.sub {
ss = append(ss, sf.String())
}
return "(" + strings.Join(ss, " + ") + ")"
}
func (ilf interleaveFilter) proto() *btpb.RowFilter {
inter := &btpb.RowFilter_Interleave{}
for _, sf := range ilf.sub {
inter.Filters = append(inter.Filters, sf.proto())
}
return &btpb.RowFilter{
Filter: &btpb.RowFilter_Interleave_{inter},
}
}
// RowKeyFilter returns a filter that matches cells from rows whose
// key matches the provided RE2 pattern.
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) }
type rowKeyFilter string
func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) }
func (rkf rowKeyFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}}
}
// FamilyFilter returns a filter that matches cells whose family name
// matches the provided RE2 pattern.
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
func FamilyFilter(pattern string) Filter { return familyFilter(pattern) }
type familyFilter string
func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) }
func (ff familyFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}}
}
// ColumnFilter returns a filter that matches cells whose column name
// matches the provided RE2 pattern.
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
func ColumnFilter(pattern string) Filter { return columnFilter(pattern) }
type columnFilter string
func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) }
func (cf columnFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}}
}
// ValueFilter returns a filter that matches cells whose value
// matches the provided RE2 pattern.
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
func ValueFilter(pattern string) Filter { return valueFilter(pattern) }
type valueFilter string
func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) }
func (vf valueFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}}
}
// LatestNFilter returns a filter that matches the most recent N cells in each column.
func LatestNFilter(n int) Filter { return latestNFilter(n) }
type latestNFilter int32
func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) }
func (lnf latestNFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}}
}
// StripValueFilter returns a filter that replaces each value with the empty string.
func StripValueFilter() Filter { return stripValueFilter{} }
type stripValueFilter struct{}
func (stripValueFilter) String() string { return "strip_value()" }
func (stripValueFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}}
}
// TimestampRangeFilter returns a filter that matches any rows whose timestamp is within the given time bounds. A zero
// time means no bound.
// The timestamp will be truncated to millisecond granularity.
func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter {
trf := timestampRangeFilter{}
if !startTime.IsZero() {
trf.startTime = Time(startTime)
}
if !endTime.IsZero() {
trf.endTime = Time(endTime)
}
return trf
}
// TimestampRangeFilterMicros returns a filter that matches any rows whose timestamp is within the given time bounds,
// specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound.
// The timestamp will be truncated to millisecond granularity.
func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter {
return timestampRangeFilter{startTime, endTime}
}
type timestampRangeFilter struct {
startTime Timestamp
endTime Timestamp
}
func (trf timestampRangeFilter) String() string {
return fmt.Sprintf("timestamp_range(%s,%s)", trf.startTime, trf.endTime)
}
func (trf timestampRangeFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{
Filter: &btpb.RowFilter_TimestampRangeFilter{
&btpb.TimestampRange{
int64(trf.startTime.TruncateToMilliseconds()),
int64(trf.endTime.TruncateToMilliseconds()),
},
}}
}
// ColumnRangeFilter returns a filter that matches a contiguous range of columns within a single
// family, as specified by an inclusive start qualifier and exclusive end qualifier.
func ColumnRangeFilter(family, start, end string) Filter {
return columnRangeFilter{family, start, end}
}
type columnRangeFilter struct {
family string
start string
end string
}
func (crf columnRangeFilter) String() string {
return fmt.Sprintf("columnRangeFilter(%s,%s,%s)", crf.family, crf.start, crf.end)
}
func (crf columnRangeFilter) proto() *btpb.RowFilter {
r := &btpb.ColumnRange{FamilyName: crf.family}
if crf.start != "" {
r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{[]byte(crf.start)}
}
if crf.end != "" {
r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{[]byte(crf.end)}
}
return &btpb.RowFilter{&btpb.RowFilter_ColumnRangeFilter{r}}
}
// ValueRangeFilter returns a filter that matches cells with values that fall within
// the given range, as specified by an inclusive start value and exclusive end value.
func ValueRangeFilter(start, end []byte) Filter {
return valueRangeFilter{start, end}
}
type valueRangeFilter struct {
start []byte
end []byte
}
func (vrf valueRangeFilter) String() string {
return fmt.Sprintf("valueRangeFilter(%s,%s)", vrf.start, vrf.end)
}
func (vrf valueRangeFilter) proto() *btpb.RowFilter {
r := &btpb.ValueRange{}
if vrf.start != nil {
r.StartValue = &btpb.ValueRange_StartValueClosed{vrf.start}
}
if vrf.end != nil {
r.EndValue = &btpb.ValueRange_EndValueOpen{vrf.end}
}
return &btpb.RowFilter{&btpb.RowFilter_ValueRangeFilter{r}}
}
// ConditionFilter returns a filter that evaluates to one of two possible filters depending
// on whether or not the given predicate filter matches at least one cell.
// If the matched filter is nil then no results will be returned.
// IMPORTANT NOTE: The predicate filter does not execute atomically with the
// true and false filters, which may lead to inconsistent or unexpected
// results. Additionally, condition filters have poor performance, especially
// when filters are set for the false condition.
func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter {
return conditionFilter{predicateFilter, trueFilter, falseFilter}
}
type conditionFilter struct {
predicateFilter Filter
trueFilter Filter
falseFilter Filter
}
func (cf conditionFilter) String() string {
return fmt.Sprintf("conditionFilter(%s,%s,%s)", cf.predicateFilter, cf.trueFilter, cf.falseFilter)
}
func (cf conditionFilter) proto() *btpb.RowFilter {
var tf *btpb.RowFilter
var ff *btpb.RowFilter
if cf.trueFilter != nil {
tf = cf.trueFilter.proto()
}
if cf.falseFilter != nil {
ff = cf.falseFilter.proto()
}
return &btpb.RowFilter{
&btpb.RowFilter_Condition_{&btpb.RowFilter_Condition{
cf.predicateFilter.proto(),
tf,
ff,
}}}
}
// TODO(dsymonds): More filters: sampling

158
vendor/cloud.google.com/go/bigtable/gc.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"fmt"
"strings"
"time"
durpb "github.com/golang/protobuf/ptypes/duration"
bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
)
// A GCPolicy represents a rule that determines which cells are eligible for garbage collection.
type GCPolicy interface {
String() string
proto() *bttdpb.GcRule
}
// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply.
func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} }
type intersectionPolicy struct {
sub []GCPolicy
}
func (ip intersectionPolicy) String() string {
var ss []string
for _, sp := range ip.sub {
ss = append(ss, sp.String())
}
return "(" + strings.Join(ss, " && ") + ")"
}
func (ip intersectionPolicy) proto() *bttdpb.GcRule {
inter := &bttdpb.GcRule_Intersection{}
for _, sp := range ip.sub {
inter.Rules = append(inter.Rules, sp.proto())
}
return &bttdpb.GcRule{
Rule: &bttdpb.GcRule_Intersection_{inter},
}
}
// UnionPolicy returns a GC policy that applies when any of its sub-policies apply.
func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} }
type unionPolicy struct {
sub []GCPolicy
}
func (up unionPolicy) String() string {
var ss []string
for _, sp := range up.sub {
ss = append(ss, sp.String())
}
return "(" + strings.Join(ss, " || ") + ")"
}
func (up unionPolicy) proto() *bttdpb.GcRule {
union := &bttdpb.GcRule_Union{}
for _, sp := range up.sub {
union.Rules = append(union.Rules, sp.proto())
}
return &bttdpb.GcRule{
Rule: &bttdpb.GcRule_Union_{union},
}
}
// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell
// except for the most recent n.
func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) }
type maxVersionsPolicy int
func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) }
func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule {
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}}
}
// MaxAgePolicy returns a GC policy that applies to all cells
// older than the given age.
func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) }
type maxAgePolicy time.Duration
var units = []struct {
d time.Duration
suffix string
}{
{24 * time.Hour, "d"},
{time.Hour, "h"},
{time.Minute, "m"},
}
func (ma maxAgePolicy) String() string {
d := time.Duration(ma)
for _, u := range units {
if d%u.d == 0 {
return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix)
}
}
return fmt.Sprintf("age() > %d", d/time.Microsecond)
}
func (ma maxAgePolicy) proto() *bttdpb.GcRule {
// This doesn't handle overflows, etc.
// Fix this if people care about GC policies over 290 years.
ns := time.Duration(ma).Nanoseconds()
return &bttdpb.GcRule{
Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{
Seconds: ns / 1e9,
Nanos: int32(ns % 1e9),
}},
}
}
// GCRuleToString converts the given GcRule proto to a user-visible string.
func GCRuleToString(rule *bttdpb.GcRule) string {
if rule == nil {
return "<default>"
}
var ruleStr string
if r, ok := rule.Rule.(*bttdpb.GcRule_MaxNumVersions); ok {
ruleStr += MaxVersionsPolicy(int(r.MaxNumVersions)).String()
} else if r, ok := rule.Rule.(*bttdpb.GcRule_MaxAge); ok {
ruleStr += MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String()
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Intersection_); ok {
var chunks []string
for _, intRule := range r.Intersection.Rules {
chunks = append(chunks, GCRuleToString(intRule))
}
ruleStr += "(" + strings.Join(chunks, " && ") + ")"
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Union_); ok {
var chunks []string
for _, unionRule := range r.Union.Rules {
chunks = append(chunks, GCRuleToString(unionRule))
}
ruleStr += "(" + strings.Join(chunks, " || ") + ")"
}
return ruleStr
}

View File

@ -0,0 +1,106 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
package gax
import (
"time"
"google.golang.org/grpc/codes"
)
type CallOption interface {
Resolve(*CallSettings)
}
type callOptions []CallOption
func (opts callOptions) Resolve(s *CallSettings) *CallSettings {
for _, opt := range opts {
opt.Resolve(s)
}
return s
}
// Encapsulates the call settings for a particular API call.
type CallSettings struct {
Timeout time.Duration
RetrySettings RetrySettings
}
// Per-call configurable settings for retrying upon transient failure.
type RetrySettings struct {
RetryCodes map[codes.Code]bool
BackoffSettings BackoffSettings
}
// Parameters to the exponential backoff algorithm for retrying.
type BackoffSettings struct {
DelayTimeoutSettings MultipliableDuration
RPCTimeoutSettings MultipliableDuration
}
type MultipliableDuration struct {
Initial time.Duration
Max time.Duration
Multiplier float64
}
func (w CallSettings) Resolve(s *CallSettings) {
s.Timeout = w.Timeout
s.RetrySettings = w.RetrySettings
s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes))
for key, value := range w.RetrySettings.RetryCodes {
s.RetrySettings.RetryCodes[key] = value
}
}
type withRetryCodes []codes.Code
func (w withRetryCodes) Resolve(s *CallSettings) {
s.RetrySettings.RetryCodes = make(map[codes.Code]bool)
for _, code := range w {
s.RetrySettings.RetryCodes[code] = true
}
}
// WithRetryCodes sets a list of Google API canonical error codes upon which a
// retry should be attempted.
func WithRetryCodes(retryCodes []codes.Code) CallOption {
return withRetryCodes(retryCodes)
}
type withDelayTimeoutSettings MultipliableDuration
func (w withDelayTimeoutSettings) Resolve(s *CallSettings) {
s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w)
}
// WithDelayTimeoutSettings specifies:
// - The initial delay time, in milliseconds, between the completion of
// the first failed request and the initiation of the first retrying
// request.
// - The multiplier by which to increase the delay time between the
// completion of failed requests, and the initiation of the subsequent
// retrying request.
// - The maximum delay time, in milliseconds, between requests. When this
// value is reached, `RetryDelayMultiplier` will no longer be used to
// increase delay time.
func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption {
return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier})
}

View File

@ -0,0 +1,84 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
package gax
import (
"math/rand"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"log"
"os"
)
var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags)
// A user defined call stub.
type APICall func(context.Context) error
// scaleDuration returns the product of a and mult.
func scaleDuration(a time.Duration, mult float64) time.Duration {
ns := float64(a) * mult
return time.Duration(ns)
}
// invokeWithRetry calls stub using an exponential backoff retry mechanism
// based on the values provided in callSettings.
func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error {
retrySettings := callSettings.RetrySettings
backoffSettings := callSettings.RetrySettings.BackoffSettings
delay := backoffSettings.DelayTimeoutSettings.Initial
for {
// If the deadline is exceeded...
if ctx.Err() != nil {
return ctx.Err()
}
err := stub(ctx)
code := grpc.Code(err)
if code == codes.OK {
return nil
}
if !retrySettings.RetryCodes[code] {
return err
}
// Sleep a random amount up to the current delay
d := time.Duration(rand.Int63n(int64(delay)))
delayCtx, _ := context.WithTimeout(ctx, delay)
logger.Printf("Retryable error: %v, retrying in %v", err, d)
<-delayCtx.Done()
delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier)
if delay > backoffSettings.DelayTimeoutSettings.Max {
delay = backoffSettings.DelayTimeoutSettings.Max
}
}
}
// Invoke calls stub with a child of context modified by the specified options.
func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error {
settings := &CallSettings{}
callOptions(opts).Resolve(settings)
if len(settings.RetrySettings.RetryCodes) > 0 {
return invokeWithRetry(ctx, stub, *settings)
}
return stub(ctx)
}

View File

@ -0,0 +1,48 @@
/*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package option contains common code for dealing with client options.
package option
import (
"fmt"
"os"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
// DefaultClientOptions returns the default client options to use for the
// client's gRPC connection.
func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) {
var o []option.ClientOption
// Check the environment variables for the bigtable emulator.
// Dial it directly and don't pass any credentials.
if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" {
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
return nil, fmt.Errorf("emulator grpc.Dial: %v", err)
}
o = []option.ClientOption{option.WithGRPCConn(conn)}
} else {
o = []option.ClientOption{
option.WithEndpoint(endpoint),
option.WithScopes(scope),
option.WithUserAgent(userAgent),
}
}
return o, nil
}

250
vendor/cloud.google.com/go/bigtable/reader.go generated vendored Normal file
View File

@ -0,0 +1,250 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bigtable
import (
"bytes"
"fmt"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
)
// A Row is returned by ReadRows. The map is keyed by column family (the prefix
// of the column name before the colon). The values are the returned ReadItems
// for that column family in the order returned by Read.
type Row map[string][]ReadItem
// Key returns the row's key, or "" if the row is empty.
func (r Row) Key() string {
for _, items := range r {
if len(items) > 0 {
return items[0].Row
}
}
return ""
}
// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column.
type ReadItem struct {
Row, Column string
Timestamp Timestamp
Value []byte
}
// The current state of the read rows state machine.
type rrState int64
const (
newRow rrState = iota
rowInProgress
cellInProgress
)
// chunkReader handles cell chunks from the read rows response and combines
// them into full Rows.
type chunkReader struct {
state rrState
curKey []byte
curFam string
curQual []byte
curTS int64
curVal []byte
curRow Row
lastKey string
}
// newChunkReader returns a new chunkReader for handling read rows responses.
func newChunkReader() *chunkReader {
return &chunkReader{state: newRow}
}
// Process takes a cell chunk and returns a new Row if the given chunk
// completes a Row, or nil otherwise.
func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) {
var row Row
switch cr.state {
case newRow:
if err := cr.validateNewRow(cc); err != nil {
return nil, err
}
cr.curRow = make(Row)
cr.curKey = cc.RowKey
cr.curFam = cc.FamilyName.Value
cr.curQual = cc.Qualifier.Value
cr.curTS = cc.TimestampMicros
row = cr.handleCellValue(cc)
case rowInProgress:
if err := cr.validateRowInProgress(cc); err != nil {
return nil, err
}
if cc.GetResetRow() {
cr.resetToNewRow()
return nil, nil
}
if cc.FamilyName != nil {
cr.curFam = cc.FamilyName.Value
}
if cc.Qualifier != nil {
cr.curQual = cc.Qualifier.Value
}
cr.curTS = cc.TimestampMicros
row = cr.handleCellValue(cc)
case cellInProgress:
if err := cr.validateCellInProgress(cc); err != nil {
return nil, err
}
if cc.GetResetRow() {
cr.resetToNewRow()
return nil, nil
}
row = cr.handleCellValue(cc)
}
return row, nil
}
// Close must be called after all cell chunks from the response
// have been processed. An error will be returned if the reader is
// in an invalid state, in which case the error should be propagated to the caller.
func (cr *chunkReader) Close() error {
if cr.state != newRow {
return fmt.Errorf("invalid state for end of stream %q", cr.state)
}
return nil
}
// handleCellValue returns a Row if the cell value includes a commit, otherwise nil.
func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row {
if cc.ValueSize > 0 {
// ValueSize is specified so expect a split value of ValueSize bytes
if cr.curVal == nil {
cr.curVal = make([]byte, 0, cc.ValueSize)
}
cr.curVal = append(cr.curVal, cc.Value...)
cr.state = cellInProgress
} else {
// This cell is either the complete value or the last chunk of a split
if cr.curVal == nil {
cr.curVal = cc.Value
} else {
cr.curVal = append(cr.curVal, cc.Value...)
}
cr.finishCell()
if cc.GetCommitRow() {
return cr.commitRow()
} else {
cr.state = rowInProgress
}
}
return nil
}
func (cr *chunkReader) finishCell() {
ri := ReadItem{
Row: string(cr.curKey),
Column: fmt.Sprintf("%s:%s", cr.curFam, cr.curQual),
Timestamp: Timestamp(cr.curTS),
Value: cr.curVal,
}
cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri)
cr.curVal = nil
}
func (cr *chunkReader) commitRow() Row {
row := cr.curRow
cr.lastKey = cr.curRow.Key()
cr.resetToNewRow()
return row
}
func (cr *chunkReader) resetToNewRow() {
cr.curKey = nil
cr.curFam = ""
cr.curQual = nil
cr.curVal = nil
cr.curRow = nil
cr.curTS = 0
cr.state = newRow
}
func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error {
if cc.GetResetRow() {
return fmt.Errorf("reset_row not allowed between rows")
}
if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil {
return fmt.Errorf("missing key field for new row %v", cc)
}
if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) {
return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey))
}
return nil
}
func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error {
if err := cr.validateRowStatus(cc); err != nil {
return err
}
if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) {
return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey)
}
if cc.FamilyName != nil && cc.Qualifier == nil {
return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName)
}
return nil
}
func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error {
if err := cr.validateRowStatus(cc); err != nil {
return err
}
if cr.curVal == nil {
return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc)
}
if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) {
return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc)
}
return nil
}
func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool {
return cc.RowKey != nil ||
cc.FamilyName != nil ||
cc.Qualifier != nil ||
cc.TimestampMicros != 0
}
// Validate a RowStatus, commit or reset, if present.
func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error {
// Resets can't be specified with any other part of a cell
if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) ||
cc.Value != nil ||
cc.ValueSize != 0 ||
cc.Labels != nil) {
return fmt.Errorf("reset must not be specified with other fields %v", cc)
}
if cc.GetCommitRow() && cc.ValueSize > 0 {
return fmt.Errorf("commit row found in between chunks in a cell")
}
return nil
}

View File

@ -0,0 +1,6 @@
#!/bin/bash
today=$(date +%Y%m%d)
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE

71
vendor/cloud.google.com/go/internal/version/version.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate ./update_version.sh
// Package version contains version information for Google Cloud Client
// Libraries for Go, as reported in request headers.
package version
import (
"runtime"
"strings"
"unicode"
)
// Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format.
const Repo = "20170404"
// Go returns the Go runtime version. The returned string
// has no whitespace.
func Go() string {
return goVersion
}
var goVersion = goVer(runtime.Version())
const develPrefix = "devel +"
func goVer(s string) string {
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return ""
}
func notSemverRune(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}

38
vendor/cloud.google.com/go/longrunning/autogen/doc.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package longrunning is an experimental, auto-generated package for the
// longrunning API.
//
//
// Use the client at cloud.google.com/go/longrunning in preference to this.
package longrunning // import "cloud.google.com/go/longrunning/autogen"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertXGoog(ctx context.Context, val []string) context.Context {
md, _ := metadata.FromOutgoingContext(ctx)
md = md.Copy()
md["x-goog-api-client"] = val
return metadata.NewOutgoingContext(ctx, md)
}
func DefaultAuthScopes() []string {
return []string{}
}

View File

@ -0,0 +1,34 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package longrunning
import (
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
)
// InternalFromConn is for use by the google Cloud Libraries only.
//
// InternalFromConn creates OperationsClient from available connection.
func InternalFromConn(conn *grpc.ClientConn) *OperationsClient {
c := &OperationsClient{
conn: conn,
CallOptions: defaultOperationsCallOptions(),
operationsClient: longrunningpb.NewOperationsClient(conn),
}
c.SetGoogleClientInfo()
return c
}

View File

@ -0,0 +1,266 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package longrunning
import (
"math"
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// OperationsCallOptions contains the retry settings for each method of OperationsClient.
type OperationsCallOptions struct {
GetOperation []gax.CallOption
ListOperations []gax.CallOption
CancelOperation []gax.CallOption
DeleteOperation []gax.CallOption
}
func defaultOperationsClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("longrunning.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultOperationsCallOptions() *OperationsCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &OperationsCallOptions{
GetOperation: retry[[2]string{"default", "idempotent"}],
ListOperations: retry[[2]string{"default", "idempotent"}],
CancelOperation: retry[[2]string{"default", "idempotent"}],
DeleteOperation: retry[[2]string{"default", "idempotent"}],
}
}
// OperationsClient is a client for interacting with Google Long Running Operations API.
type OperationsClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
operationsClient longrunningpb.OperationsClient
// The call options for this service.
CallOptions *OperationsCallOptions
// The metadata to be sent with each request.
xGoogHeader []string
}
// NewOperationsClient creates a new operations client.
//
// Manages long-running operations with an API service.
//
// When an API method normally takes long time to complete, it can be designed
// to return [Operation][google.longrunning.Operation] to the client, and the client can use this
// interface to receive the real response asynchronously by polling the
// operation resource, or pass the operation resource to another API (such as
// Google Cloud Pub/Sub API) to receive the response. Any API service that
// returns long-running operations should implement the `Operations` interface
// so developers can have a consistent client experience.
func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultOperationsClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &OperationsClient{
conn: conn,
CallOptions: defaultOperationsCallOptions(),
operationsClient: longrunningpb.NewOperationsClient(conn),
}
c.SetGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *OperationsClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *OperationsClient) Close() error {
return c.conn.Close()
}
// SetGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *OperationsClient) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
}
// GetOperation gets the latest state of a long-running operation. Clients can use this
// method to poll the operation result at intervals as recommended by the API
// service.
func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListOperations lists operations that match the specified filter in the request. If the
// server doesn't support this method, it returns `UNIMPLEMENTED`.
//
// NOTE: the `name` binding below allows API services to override the binding
// to use different resource name schemes, such as `users/*/operations`.
func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...)
it := &OperationIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
var resp *longrunningpb.ListOperationsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.operationsClient.ListOperations(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Operations, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// CancelOperation starts asynchronous cancellation on a long-running operation. The server
// makes a best effort to cancel the operation, but success is not
// guaranteed. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`. Clients can use
// [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
// other methods to check whether the cancellation succeeded or whether the
// operation completed despite cancellation. On successful cancellation,
// the operation is not deleted; instead, it becomes an operation with
// an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`.
func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.operationsClient.CancelOperation(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// DeleteOperation deletes a long-running operation. This method indicates that the client is
// no longer interested in the operation result. It does not cancel the
// operation. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`.
func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.DeleteOperation[0:len(c.CallOptions.DeleteOperation):len(c.CallOptions.DeleteOperation)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.operationsClient.DeleteOperation(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// OperationIterator manages a stream of *longrunningpb.Operation.
type OperationIterator struct {
items []*longrunningpb.Operation
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *OperationIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *OperationIterator) Next() (*longrunningpb.Operation, error) {
var item *longrunningpb.Operation
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *OperationIterator) bufLen() int {
return len(it.items)
}
func (it *OperationIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

168
vendor/cloud.google.com/go/longrunning/longrunning.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package longrunning supports Long Running Operations for the Google Cloud Libraries.
// See google.golang.org/genproto/googleapis/longrunning for its service definition.
//
// Users of the Google Cloud Libraries will typically not use this package directly.
// Instead they will call functions returning Operations and call their methods.
//
// This package is still experimental and subject to change.
package longrunning // import "cloud.google.com/go/longrunning"
import (
"errors"
"fmt"
"time"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
autogen "cloud.google.com/go/longrunning/autogen"
pb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// ErrNoMetadata is the error returned by Metadata if the operation contains no metadata.
var ErrNoMetadata = errors.New("operation contains no metadata")
// Operation represents the result of an API call that may not be ready yet.
type Operation struct {
c operationsClient
proto *pb.Operation
}
type operationsClient interface {
GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error)
CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error
DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error
}
// InternalNewOperation is for use by the google Cloud Libraries only.
//
// InternalNewOperation returns an long-running operation, abstracting the raw pb.Operation.
// The conn parameter refers to a server that proto was received from.
func InternalNewOperation(inner *autogen.OperationsClient, proto *pb.Operation) *Operation {
return &Operation{
c: inner,
proto: proto,
}
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service
// from which the operation is created.
func (op *Operation) Name() string {
return op.proto.Name
}
// Done reports whether the long-running operation has completed.
func (op *Operation) Done() bool {
return op.proto.Done
}
// Metadata unmarshals op's metadata into meta.
// If op does not contain any metadata, Metadata returns ErrNoMetadata and meta is unmodified.
func (op *Operation) Metadata(meta proto.Message) error {
if m := op.proto.Metadata; m != nil {
return ptypes.UnmarshalAny(m, meta)
}
return ErrNoMetadata
}
// Poll fetches the latest state of a long-running operation.
//
// If Poll fails, the error is returned and op is unmodified.
// If Poll succeeds and the operation has completed with failure,
// the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true; if resp != nil, the response of the operation
// is stored in resp.
func (op *Operation) Poll(ctx context.Context, resp proto.Message, opts ...gax.CallOption) error {
if !op.Done() {
p, err := op.c.GetOperation(ctx, &pb.GetOperationRequest{Name: op.Name()}, opts...)
if err != nil {
return err
}
op.proto = p
}
if !op.Done() {
return nil
}
switch r := op.proto.Result.(type) {
case *pb.Operation_Error:
// TODO (pongad): r.Details may contain further information
return grpc.Errorf(codes.Code(r.Error.Code), "%s", r.Error.Message)
case *pb.Operation_Response:
if resp == nil {
return nil
}
return ptypes.UnmarshalAny(r.Response, resp)
default:
return fmt.Errorf("unsupported result type %[1]T: %[1]v", r)
}
}
// Wait blocks until the operation is completed.
// If resp != nil, Wait stores the response in resp.
//
// See documentation of Poll for error-handling information.
func (op *Operation) Wait(ctx context.Context, resp proto.Message, opts ...gax.CallOption) error {
bo := gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 10 * time.Second,
}
return op.wait(ctx, resp, &bo, gax.Sleep, opts...)
}
type sleeper func(context.Context, time.Duration) error
// wait implements Wait, taking exponentialBackoff and sleeper arguments for testing.
func (op *Operation) wait(ctx context.Context, resp proto.Message, bo *gax.Backoff, sl sleeper, opts ...gax.CallOption) error {
for {
if err := op.Poll(ctx, resp, opts...); err != nil {
return err
}
if op.Done() {
return nil
}
if err := sl(ctx, bo.Pause()); err != nil {
return err
}
}
}
// Cancel starts asynchronous cancellation on a long-running operation. The server
// makes a best effort to cancel the operation, but success is not
// guaranteed. If the server doesn't support this method, it returns
// grpc.Code(error) == codes.Unimplemented. Clients can use
// Poll or other methods to check whether the cancellation succeeded or whether the
// operation completed despite cancellation. On successful cancellation,
// the operation is not deleted; instead, op.Poll returns an error
// with code Canceled.
func (op *Operation) Cancel(ctx context.Context, opts ...gax.CallOption) error {
return op.c.CancelOperation(ctx, &pb.CancelOperationRequest{Name: op.Name()}, opts...)
}
// Delete deletes a long-running operation. This method indicates that the client is
// no longer interested in the operation result. It does not cancel the
// operation. If the server doesn't support this method, grpc.Code(error) == codes.Unimplemented.
func (op *Operation) Delete(ctx context.Context, opts ...gax.CallOption) error {
return op.c.DeleteOperation(ctx, &pb.DeleteOperationRequest{Name: op.Name()}, opts...)
}

View File

@ -0,0 +1,36 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/
# at src/google/protobuf/descriptor.proto
regenerate:
@echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto

File diff suppressed because it is too large Load Diff

136
vendor/github.com/golang/protobuf/ptypes/any.go generated vendored Normal file
View File

@ -0,0 +1,136 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements functions to marshal proto.Message to/from
// google.protobuf.Any message.
import (
"fmt"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
)
const googleApis = "type.googleapis.com/"
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
//
// Note that regular type assertions should be done using the Is
// function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *any.Any) (string, error) {
slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return any.TypeUrl[slash+1:], nil
}
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
func MarshalAny(pb proto.Message) (*any.Any, error) {
value, err := proto.Marshal(pb)
if err != nil {
return nil, err
}
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in a google.protobuf.Any
// message. The allocated message is stored in the embedded proto.Message.
//
// Example:
//
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
type DynamicAny struct {
proto.Message
}
// Empty returns a new proto.Message of the type specified in a
// google.protobuf.Any message. It returns an error if corresponding message
// type isn't linked in.
func Empty(any *any.Any) (proto.Message, error) {
aname, err := AnyMessageName(any)
if err != nil {
return nil, err
}
t := proto.MessageType(aname)
if t == nil {
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
}
return reflect.New(t.Elem()).Interface().(proto.Message), nil
}
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
// message and places the decoded result in pb. It returns an error if type of
// contents of Any message does not match type of pb message.
//
// pb can be a proto.Message, or a *DynamicAny.
func UnmarshalAny(any *any.Any, pb proto.Message) error {
if d, ok := pb.(*DynamicAny); ok {
if d.Message == nil {
var err error
d.Message, err = Empty(any)
if err != nil {
return err
}
}
return UnmarshalAny(any, d.Message)
}
aname, err := AnyMessageName(any)
if err != nil {
return err
}
mname := proto.MessageName(pb)
if aname != mname {
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
}
return proto.Unmarshal(any.Value, pb)
}
// Is returns true if any value contains a given message type.
func Is(any *any.Any, pb proto.Message) bool {
aname, err := AnyMessageName(any)
if err != nil {
return false
}
return aname == proto.MessageName(pb)
}

168
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
/*
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
It has these top-level messages:
Any
*/
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
}
return ""
}
func (m *Any) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 184 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
0x38, 0xe5, 0x73, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab,
0x98, 0xe4, 0xdc, 0x21, 0x46, 0x05, 0x40, 0x95, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5,
0x97, 0xe7, 0x85, 0x80, 0x94, 0x26, 0xb1, 0x81, 0xf5, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
0x45, 0x1f, 0x1a, 0xf2, 0xf3, 0x00, 0x00, 0x00,
}

139
vendor/github.com/golang/protobuf/ptypes/any/any.proto generated vendored Normal file
View File

@ -0,0 +1,139 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option go_package = "github.com/golang/protobuf/ptypes/any";
option java_package = "com.google.protobuf";
option java_outer_classname = "AnyProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
message Any {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
string type_url = 1;
// Must be a valid serialized protocol buffer of the above specified type.
bytes value = 2;
}

35
vendor/github.com/golang/protobuf/ptypes/doc.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package ptypes contains code for interacting with well-known types.
*/
package ptypes

102
vendor/github.com/golang/protobuf/ptypes/duration.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements conversions between google.protobuf.Duration
// and time.Duration.
import (
"errors"
"fmt"
"time"
durpb "github.com/golang/protobuf/ptypes/duration"
)
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// validateDuration determines whether the durpb.Duration is valid according to the
// definition in google/protobuf/duration.proto. A valid durpb.Duration
// may still be too large to fit into a time.Duration (the range of durpb.Duration
// is about 10,000 years, and the range of time.Duration is about 290).
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
}
// Duration converts a durpb.Duration to a time.Duration. Duration
// returns an error if the durpb.Duration is invalid or is too large to be
// represented in a time.Duration.
func Duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos)
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durpb.Duration.
func DurationProto(d time.Duration) *durpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durpb.Duration{
Seconds: secs,
Nanos: int32(nanos),
}
}

View File

@ -0,0 +1,146 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
/*
Package duration is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/duration/duration.proto
It has these top-level messages:
Duration
*/
package duration
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 189 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6,
0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98,
0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xfb, 0x83, 0x91, 0x71, 0x11, 0x13,
0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9,
0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01,
0x01, 0x00, 0x00, 0xff, 0xff, 0x45, 0x5a, 0x81, 0x3d, 0x0e, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,117 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/duration";
option java_package = "com.google.protobuf";
option java_outer_classname = "DurationProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
int32 nanos = 2;
}

View File

@ -0,0 +1,68 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/empty/empty.proto
/*
Package empty is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/empty/empty.proto
It has these top-level messages:
Empty
*/
package empty
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A generic empty message that you can re-use to avoid defining duplicated
// empty messages in your APIs. A typical example is to use it as the request
// or the response type of an API method. For instance:
//
// service Foo {
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
// }
//
// The JSON representation for `Empty` is empty JSON object `{}`.
type Empty struct {
}
func (m *Empty) Reset() { *m = Empty{} }
func (m *Empty) String() string { return proto.CompactTextString(m) }
func (*Empty) ProtoMessage() {}
func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Empty) XXX_WellKnownType() string { return "Empty" }
func init() {
proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
}
func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/empty/empty.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 147 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd,
0x2d, 0x28, 0xa9, 0x84, 0x90, 0x7a, 0x60, 0x39, 0x21, 0xfe, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54,
0x3d, 0x98, 0x4a, 0x25, 0x76, 0x2e, 0x56, 0x57, 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e,
0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x27, 0xd2,
0xce, 0x1f, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c,
0x0c, 0x80, 0xaa, 0xd3, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9,
0x4f, 0x62, 0x03, 0x1b, 0x60, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x8e, 0x0a, 0x06, 0xcf,
0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,52 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option go_package = "github.com/golang/protobuf/ptypes/empty";
option java_package = "com.google.protobuf";
option java_outer_classname = "EmptyProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
option cc_enable_arenas = true;
// A generic empty message that you can re-use to avoid defining duplicated
// empty messages in your APIs. A typical example is to use it as the request
// or the response type of an API method. For instance:
//
// service Foo {
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
// }
//
// The JSON representation for `Empty` is empty JSON object `{}`.
message Empty {}

66
vendor/github.com/golang/protobuf/ptypes/regen.sh generated vendored Executable file
View File

@ -0,0 +1,66 @@
#!/bin/bash -e
#
# This script fetches and rebuilds the "well-known types" protocol buffers.
# To run this you will need protoc and goprotobuf installed;
# see https://github.com/golang/protobuf for instructions.
# You also need Go and Git installed.
PKG=github.com/golang/protobuf/ptypes
UPSTREAM=https://github.com/google/protobuf
UPSTREAM_SUBDIR=src/google/protobuf
PROTO_FILES='
any.proto
duration.proto
empty.proto
struct.proto
timestamp.proto
wrappers.proto
'
function die() {
echo 1>&2 $*
exit 1
}
# Sanity check that the right tools are accessible.
for tool in go git protoc protoc-gen-go; do
q=$(which $tool) || die "didn't find $tool"
echo 1>&2 "$tool: $q"
done
tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
trap 'rm -rf $tmpdir' EXIT
echo -n 1>&2 "finding package dir... "
pkgdir=$(go list -f '{{.Dir}}' $PKG)
echo 1>&2 $pkgdir
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
echo 1>&2 "base: $base"
cd $base
echo 1>&2 "fetching latest protos... "
git clone -q $UPSTREAM $tmpdir
# Pass 1: build mapping from upstream filename to our filename.
declare -A filename_map
for f in $(cd $PKG && find * -name '*.proto'); do
echo -n 1>&2 "looking for latest version of $f... "
up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/)
echo 1>&2 $up
if [ $(echo $up | wc -w) != "1" ]; then
die "not exactly one match"
fi
filename_map[$up]=$f
done
# Pass 2: copy files
for up in "${!filename_map[@]}"; do
f=${filename_map[$up]}
shortname=$(basename $f | sed 's,\.proto$,,')
cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f
done
# Run protoc once per package.
for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do
echo 1>&2 "* $dir"
protoc --go_out=. $dir/*.proto
done
echo 1>&2 "All OK"

125
vendor/github.com/golang/protobuf/ptypes/timestamp.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements operations on google.protobuf.Timestamp.
import (
"errors"
"fmt"
"time"
tspb "github.com/golang/protobuf/ptypes/timestamp"
)
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range
// [0001-01-01, 10000-01-01) and has a Nanos field
// in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes
// the problem.
//
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
func validateTimestamp(ts *tspb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return value
// is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
seconds := t.Unix()
nanos := int32(t.Sub(time.Unix(seconds, 0)))
ts := &tspb.Timestamp{
Seconds: seconds,
Nanos: nanos,
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
// Timestamps, it returns an error message in parentheses.
func TimestampString(ts *tspb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}

View File

@ -0,0 +1,162 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
/*
Package timestamp is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
It has these top-level messages:
Timestamp
*/
package timestamp
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
// to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Timestamp) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9,
0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3,
0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24,
0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d,
0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x8e, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x4c, 0x27, 0x3e,
0xb8, 0x89, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0xdc, 0xfc, 0x83, 0x91, 0x71, 0x11,
0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, 0xe1,
0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, 0x8c,
0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x59, 0x0a, 0x4d, 0x13, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,133 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
option java_package = "com.google.protobuf";
option java_outer_classname = "TimestampProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
// to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
int32 nanos = 2;
}

View File

@ -0,0 +1,262 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
/*
Package wrappers is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
It has these top-level messages:
DoubleValue
FloatValue
Int64Value
UInt64Value
Int32Value
UInt32Value
BoolValue
StringValue
BytesValue
*/
package wrappers
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Wrapper message for `double`.
//
// The JSON representation for `DoubleValue` is JSON number.
type DoubleValue struct {
// The double value.
Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
func (m *DoubleValue) Reset() { *m = DoubleValue{} }
func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
func (*DoubleValue) ProtoMessage() {}
func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
func (m *DoubleValue) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `float`.
//
// The JSON representation for `FloatValue` is JSON number.
type FloatValue struct {
// The float value.
Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"`
}
func (m *FloatValue) Reset() { *m = FloatValue{} }
func (m *FloatValue) String() string { return proto.CompactTextString(m) }
func (*FloatValue) ProtoMessage() {}
func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
func (m *FloatValue) GetValue() float32 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `int64`.
//
// The JSON representation for `Int64Value` is JSON string.
type Int64Value struct {
// The int64 value.
Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
}
func (m *Int64Value) Reset() { *m = Int64Value{} }
func (m *Int64Value) String() string { return proto.CompactTextString(m) }
func (*Int64Value) ProtoMessage() {}
func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
func (m *Int64Value) GetValue() int64 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `uint64`.
//
// The JSON representation for `UInt64Value` is JSON string.
type UInt64Value struct {
// The uint64 value.
Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
}
func (m *UInt64Value) Reset() { *m = UInt64Value{} }
func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
func (*UInt64Value) ProtoMessage() {}
func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
func (m *UInt64Value) GetValue() uint64 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `int32`.
//
// The JSON representation for `Int32Value` is JSON number.
type Int32Value struct {
// The int32 value.
Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
}
func (m *Int32Value) Reset() { *m = Int32Value{} }
func (m *Int32Value) String() string { return proto.CompactTextString(m) }
func (*Int32Value) ProtoMessage() {}
func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
func (m *Int32Value) GetValue() int32 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `uint32`.
//
// The JSON representation for `UInt32Value` is JSON number.
type UInt32Value struct {
// The uint32 value.
Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
}
func (m *UInt32Value) Reset() { *m = UInt32Value{} }
func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
func (*UInt32Value) ProtoMessage() {}
func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
func (m *UInt32Value) GetValue() uint32 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `bool`.
//
// The JSON representation for `BoolValue` is JSON `true` and `false`.
type BoolValue struct {
// The bool value.
Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
}
func (m *BoolValue) Reset() { *m = BoolValue{} }
func (m *BoolValue) String() string { return proto.CompactTextString(m) }
func (*BoolValue) ProtoMessage() {}
func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
func (m *BoolValue) GetValue() bool {
if m != nil {
return m.Value
}
return false
}
// Wrapper message for `string`.
//
// The JSON representation for `StringValue` is JSON string.
type StringValue struct {
// The string value.
Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
}
func (m *StringValue) Reset() { *m = StringValue{} }
func (m *StringValue) String() string { return proto.CompactTextString(m) }
func (*StringValue) ProtoMessage() {}
func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
func (m *StringValue) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
// Wrapper message for `bytes`.
//
// The JSON representation for `BytesValue` is JSON string.
type BytesValue struct {
// The bytes value.
Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *BytesValue) Reset() { *m = BytesValue{} }
func (m *BytesValue) String() string { return proto.CompactTextString(m) }
func (*BytesValue) ProtoMessage() {}
func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
func (m *BytesValue) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
}
func init() {
proto.RegisterFile("github.com/golang/protobuf/ptypes/wrappers/wrappers.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 257 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x2f,
0x4a, 0x2c, 0x28, 0x48, 0x2d, 0x42, 0x30, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3,
0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, 0x52, 0xc3,
0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, 0x46, 0x0d,
0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, 0x1a, 0x26,
0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x28, 0x73, 0x71, 0x87,
0xe2, 0x52, 0xc4, 0x82, 0x6a, 0x90, 0xb1, 0x11, 0x16, 0x35, 0xac, 0x68, 0x06, 0x61, 0x55, 0xc4,
0x0b, 0x53, 0xa4, 0xc8, 0xc5, 0xe9, 0x94, 0x9f, 0x9f, 0x83, 0x45, 0x09, 0x07, 0x92, 0x39, 0xc1,
0x25, 0x45, 0x99, 0x79, 0xe9, 0x58, 0x14, 0x71, 0x22, 0x39, 0xc8, 0xa9, 0xb2, 0x24, 0xb5, 0x18,
0x8b, 0x1a, 0x1e, 0xa8, 0x1a, 0xa7, 0x1a, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0x3d, 0xb4, 0xd0, 0x75,
0xe2, 0x0d, 0x87, 0x06, 0x7f, 0x00, 0x48, 0x24, 0x80, 0x31, 0x4a, 0x8b, 0xf8, 0xa8, 0xfb, 0xc1,
0xc8, 0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0xc4, 0xdc, 0x00, 0xa8,
0x52, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, 0x96, 0x24, 0x36,
0xb0, 0x19, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xee, 0x36, 0x8d, 0xd8, 0x19, 0x02, 0x00,
0x00,
}

View File

@ -0,0 +1,118 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Wrappers for primitive (non-message) types. These types are useful
// for embedding primitives in the `google.protobuf.Any` type and for places
// where we need to distinguish between the absence of a primitive
// typed field and its default value.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/wrappers";
option java_package = "com.google.protobuf";
option java_outer_classname = "WrappersProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// Wrapper message for `double`.
//
// The JSON representation for `DoubleValue` is JSON number.
message DoubleValue {
// The double value.
double value = 1;
}
// Wrapper message for `float`.
//
// The JSON representation for `FloatValue` is JSON number.
message FloatValue {
// The float value.
float value = 1;
}
// Wrapper message for `int64`.
//
// The JSON representation for `Int64Value` is JSON string.
message Int64Value {
// The int64 value.
int64 value = 1;
}
// Wrapper message for `uint64`.
//
// The JSON representation for `UInt64Value` is JSON string.
message UInt64Value {
// The uint64 value.
uint64 value = 1;
}
// Wrapper message for `int32`.
//
// The JSON representation for `Int32Value` is JSON number.
message Int32Value {
// The int32 value.
int32 value = 1;
}
// Wrapper message for `uint32`.
//
// The JSON representation for `UInt32Value` is JSON number.
message UInt32Value {
// The uint32 value.
uint32 value = 1;
}
// Wrapper message for `bool`.
//
// The JSON representation for `BoolValue` is JSON `true` and `false`.
message BoolValue {
// The bool value.
bool value = 1;
}
// Wrapper message for `string`.
//
// The JSON representation for `StringValue` is JSON string.
message StringValue {
// The string value.
string value = 1;
}
// Wrapper message for `bytes`.
//
// The JSON representation for `BytesValue` is JSON string.
message BytesValue {
// The bytes value.
bytes value = 1;
}

View File

@ -121,6 +121,9 @@ func (bo *Backoff) Pause() time.Duration {
if bo.Multiplier < 1 {
bo.Multiplier = 2
}
// Select a duration between zero and the current max. It might seem counterintuitive to
// have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html
// argues that that is the best strategy.
d := time.Duration(rand.Int63n(int64(bo.cur)))
bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
if bo.cur > bo.Max {
@ -129,8 +132,21 @@ func (bo *Backoff) Pause() time.Duration {
return d
}
type grpcOpt []grpc.CallOption
func (o grpcOpt) Resolve(s *CallSettings) {
s.GRPC = o
}
func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
return grpcOpt(append([]grpc.CallOption(nil), opt...))
}
type CallSettings struct {
// Retry returns a Retryer to be used to control retry logic of a method call.
// If Retry is nil or the returned Retryer is nil, the call will not be retried.
Retry func() Retryer
// CallOptions to be forwarded to GRPC.
GRPC []grpc.CallOption
}

24
vendor/github.com/googleapis/gax-go/header.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package gax
import "bytes"
// XGoogHeader is for use by the Google Cloud Libraries only.
//
// XGoogHeader formats key-value pairs.
// The resulting string is suitable for x-goog-api-client header.
func XGoogHeader(keyval ...string) string {
if len(keyval) == 0 {
return ""
}
if len(keyval)%2 != 0 {
panic("gax.Header: odd argument count")
}
var buf bytes.Buffer
for i := 0; i < len(keyval); i += 2 {
buf.WriteByte(' ')
buf.WriteString(keyval[i])
buf.WriteByte('/')
buf.WriteString(keyval[i+1])
}
return buf.String()[1:]
}

View File

@ -36,7 +36,7 @@ import (
)
// A user defined call stub.
type APICall func(context.Context) error
type APICall func(context.Context, CallSettings) error
// Invoke calls the given APICall,
// performing retries as specified by opts, if any.
@ -67,7 +67,7 @@ type sleeper func(ctx context.Context, d time.Duration) error
func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
var retryer Retryer
for {
err := call(ctx)
err := call(ctx, settings)
if err == nil {
return nil
}

641
vendor/golang.org/x/net/http2/ciphers.go generated vendored Normal file
View File

@ -0,0 +1,641 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
// A list of the possible cipher suite ids. Taken from
// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt
const (
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
// Reserved uint16 = 0x001C-1D
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
// Reserved uint16 = 0x0047-4F
// Reserved uint16 = 0x0050-58
// Reserved uint16 = 0x0059-5C
// Unassigned uint16 = 0x005D-5F
// Reserved uint16 = 0x0060-66
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
// Unassigned uint16 = 0x006E-83
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
// Unassigned uint16 = 0x00C6-FE
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
// Unassigned uint16 = 0x01-55,*
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
// Unassigned uint16 = 0x5601 - 0xC000
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
// Unassigned uint16 = 0xC0B0-FF
// Unassigned uint16 = 0xC1-CB,*
// Unassigned uint16 = 0xCC00-A7
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
)
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
// References:
// https://tools.ietf.org/html/rfc7540#appendix-A
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
func isBadCipher(cipher uint16) bool {
switch cipher {
case cipher_TLS_NULL_WITH_NULL_NULL,
cipher_TLS_RSA_WITH_NULL_MD5,
cipher_TLS_RSA_WITH_NULL_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_RSA_WITH_RC4_128_MD5,
cipher_TLS_RSA_WITH_RC4_128_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_RSA_WITH_DES_CBC_SHA,
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_RC4_128_SHA,
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
cipher_TLS_KRB5_WITH_RC4_128_MD5,
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_PSK_WITH_NULL_SHA,
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_NULL_SHA256,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_PSK_WITH_RC4_128_SHA,
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_PSK_WITH_NULL_SHA256,
cipher_TLS_PSK_WITH_NULL_SHA384,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_AES_128_CCM,
cipher_TLS_RSA_WITH_AES_256_CCM,
cipher_TLS_RSA_WITH_AES_128_CCM_8,
cipher_TLS_RSA_WITH_AES_256_CCM_8,
cipher_TLS_PSK_WITH_AES_128_CCM,
cipher_TLS_PSK_WITH_AES_256_CCM,
cipher_TLS_PSK_WITH_AES_128_CCM_8,
cipher_TLS_PSK_WITH_AES_256_CCM_8:
return true
default:
return false
}
}

View File

@ -247,7 +247,7 @@ func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
}
// noDialClientConnPool is an implementation of http2.ClientConnPool
// which never dials. We let the HTTP/1.1 client dial and use its TLS
// which never dials. We let the HTTP/1.1 client dial and use its TLS
// connection instead.
type noDialClientConnPool struct{ *clientConnPool }

View File

@ -56,7 +56,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) {
}
// registerHTTPSProtocol calls Transport.RegisterProtocol but
// convering panics into errors.
// converting panics into errors.
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
defer func() {
if e := recover(); e != nil {

146
vendor/golang.org/x/net/http2/databuffer.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
"fmt"
"sync"
)
// Buffer chunks are allocated from a pool to reduce pressure on GC.
// The maximum wasted space per dataBuffer is 2x the largest size class,
// which happens when the dataBuffer has multiple chunks and there is
// one unread byte in both the first and last chunks. We use a few size
// classes to minimize overheads for servers that typically receive very
// small request bodies.
//
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
var (
dataChunkSizeClasses = []int{
1 << 10,
2 << 10,
4 << 10,
8 << 10,
16 << 10,
}
dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return make([]byte, 1<<10) }},
{New: func() interface{} { return make([]byte, 2<<10) }},
{New: func() interface{} { return make([]byte, 4<<10) }},
{New: func() interface{} { return make([]byte, 8<<10) }},
{New: func() interface{} { return make([]byte, 16<<10) }},
}
)
func getDataBufferChunk(size int64) []byte {
i := 0
for ; i < len(dataChunkSizeClasses)-1; i++ {
if size <= int64(dataChunkSizeClasses[i]) {
break
}
}
return dataChunkPools[i].Get().([]byte)
}
func putDataBufferChunk(p []byte) {
for i, n := range dataChunkSizeClasses {
if len(p) == n {
dataChunkPools[i].Put(p)
return
}
}
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
// Each dataBuffer is used to read DATA frames on a single stream.
// The buffer is divided into chunks so the server can limit the
// total memory used by a single connection without limiting the
// request body size on any single stream.
type dataBuffer struct {
chunks [][]byte
r int // next byte to read is chunks[0][r]
w int // next byte to write is chunks[len(chunks)-1][w]
size int // total buffered bytes
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
}
var errReadEmpty = errors.New("read from empty dataBuffer")
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *dataBuffer) Read(p []byte) (int, error) {
if b.size == 0 {
return 0, errReadEmpty
}
var ntotal int
for len(p) > 0 && b.size > 0 {
readFrom := b.bytesFromFirstChunk()
n := copy(p, readFrom)
p = p[n:]
ntotal += n
b.r += n
b.size -= n
// If the first chunk has been consumed, advance to the next chunk.
if b.r == len(b.chunks[0]) {
putDataBufferChunk(b.chunks[0])
end := len(b.chunks) - 1
copy(b.chunks[:end], b.chunks[1:])
b.chunks[end] = nil
b.chunks = b.chunks[:end]
b.r = 0
}
}
return ntotal, nil
}
func (b *dataBuffer) bytesFromFirstChunk() []byte {
if len(b.chunks) == 1 {
return b.chunks[0][b.r:b.w]
}
return b.chunks[0][b.r:]
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *dataBuffer) Len() int {
return b.size
}
// Write appends p to the buffer.
func (b *dataBuffer) Write(p []byte) (int, error) {
ntotal := len(p)
for len(p) > 0 {
// If the last chunk is empty, allocate a new chunk. Try to allocate
// enough to fully copy p plus any additional bytes we expect to
// receive. However, this may allocate less than len(p).
want := int64(len(p))
if b.expected > want {
want = b.expected
}
chunk := b.lastChunkOrAlloc(want)
n := copy(chunk[b.w:], p)
p = p[n:]
b.w += n
b.size += n
b.expected -= int64(n)
}
return ntotal, nil
}
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
if len(b.chunks) != 0 {
last := b.chunks[len(b.chunks)-1]
if b.w < len(last) {
return last
}
}
chunk := getDataBufferChunk(want)
b.chunks = append(b.chunks, chunk)
b.w = 0
return chunk
}

View File

@ -1,60 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
)
// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
// It never allocates, but moves old data as new data is written.
type fixedBuffer struct {
buf []byte
r, w int
}
var (
errReadEmpty = errors.New("read from empty fixedBuffer")
errWriteFull = errors.New("write on full fixedBuffer")
)
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *fixedBuffer) Read(p []byte) (n int, err error) {
if b.r == b.w {
return 0, errReadEmpty
}
n = copy(p, b.buf[b.r:b.w])
b.r += n
if b.r == b.w {
b.r = 0
b.w = 0
}
return n, nil
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *fixedBuffer) Len() int {
return b.w - b.r
}
// Write copies bytes from p into the buffer.
// It is an error to write more data than the buffer can hold.
func (b *fixedBuffer) Write(p []byte) (n int, err error) {
// Slide existing data to beginning.
if b.r > 0 && len(p) > len(b.buf)-b.w {
copy(b.buf, b.buf[b.r:b.w])
b.w -= b.r
b.r = 0
}
// Write new data.
n = copy(b.buf[b.w:], p)
b.w += n
if n < len(p) {
err = errWriteFull
}
return n, err
}

View File

@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
// a frameParser parses a frame given its FrameHeader and payload
// bytes. The length of payload will always equal fh.Length (which
// might be 0).
type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
var frameParsers = map[FrameType]frameParser{
FrameData: parseDataFrame,
@ -312,7 +312,7 @@ type Framer struct {
MaxHeaderListSize uint32
// TODO: track which type of frame & with which flags was sent
// last. Then return an error (unless AllowIllegalWrites) if
// last. Then return an error (unless AllowIllegalWrites) if
// we're in the middle of a header block and a
// non-Continuation or Continuation on a different stream is
// attempted to be written.
@ -323,6 +323,8 @@ type Framer struct {
debugFramerBuf *bytes.Buffer
debugReadLoggerf func(string, ...interface{})
debugWriteLoggerf func(string, ...interface{})
frameCache *frameCache // nil if frames aren't reused (default)
}
func (fr *Framer) maxHeaderListSize() uint32 {
@ -398,6 +400,27 @@ const (
maxFrameSize = 1<<24 - 1
)
// SetReuseFrames allows the Framer to reuse Frames.
// If called on a Framer, Frames returned by calls to ReadFrame are only
// valid until the next call to ReadFrame.
func (fr *Framer) SetReuseFrames() {
if fr.frameCache != nil {
return
}
fr.frameCache = &frameCache{}
}
type frameCache struct {
dataFrame DataFrame
}
func (fc *frameCache) getDataFrame() *DataFrame {
if fc == nil {
return &DataFrame{}
}
return &fc.dataFrame
}
// NewFramer returns a Framer that writes frames to w and reads them from r.
func NewFramer(w io.Writer, r io.Reader) *Framer {
fr := &Framer{
@ -477,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
if _, err := io.ReadFull(fr.r, payload); err != nil {
return nil, err
}
f, err := typeFrameParser(fh.Type)(fh, payload)
f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
if err != nil {
if ce, ok := err.(connError); ok {
return nil, fr.connError(ce.Code, ce.Reason)
@ -565,7 +588,7 @@ func (f *DataFrame) Data() []byte {
return f.data
}
func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 {
// DATA frames MUST be associated with a stream. If a
// DATA frame is received whose stream identifier
@ -574,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
// PROTOCOL_ERROR.
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
}
f := &DataFrame{
FrameHeader: fh,
}
f := fc.getDataFrame()
f.FrameHeader = fh
var padSize byte
if fh.Flags.Has(FlagDataPadded) {
var err error
@ -600,6 +623,7 @@ var (
errStreamID = errors.New("invalid stream ID")
errDepStreamID = errors.New("invalid dependent stream ID")
errPadLength = errors.New("pad length too large")
errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
)
func validStreamIDOrZero(streamID uint32) bool {
@ -623,6 +647,7 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
//
// If pad is nil, the padding bit is not sent.
// The length of pad must not exceed 255 bytes.
// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility not to violate the maximum frame size
@ -631,8 +656,18 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
if !validStreamID(streamID) && !f.AllowIllegalWrites {
return errStreamID
}
if len(pad) > 255 {
return errPadLength
if len(pad) > 0 {
if len(pad) > 255 {
return errPadLength
}
if !f.AllowIllegalWrites {
for _, b := range pad {
if b != 0 {
// "Padding octets MUST be set to zero when sending."
return errPadBytes
}
}
}
}
var flags Flags
if endStream {
@ -660,10 +695,10 @@ type SettingsFrame struct {
p []byte
}
func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
// When this (ACK 0x1) bit is set, the payload of the
// SETTINGS frame MUST be empty. Receipt of a
// SETTINGS frame MUST be empty. Receipt of a
// SETTINGS frame with the ACK flag set and a length
// field value other than 0 MUST be treated as a
// connection error (Section 5.4.1) of type
@ -672,7 +707,7 @@ func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
}
if fh.StreamID != 0 {
// SETTINGS frames always apply to a connection,
// never a single stream. The stream identifier for a
// never a single stream. The stream identifier for a
// SETTINGS frame MUST be zero (0x0). If an endpoint
// receives a SETTINGS frame whose stream identifier
// field is anything other than 0x0, the endpoint MUST
@ -762,7 +797,7 @@ type PingFrame struct {
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if len(payload) != 8 {
return nil, ConnectionError(ErrCodeFrameSize)
}
@ -802,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte {
return f.debugData
}
func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID != 0 {
return nil, ConnectionError(ErrCodeProtocol)
}
@ -842,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte {
return f.p
}
func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
return &UnknownFrame{fh, p}, nil
}
@ -853,7 +888,7 @@ type WindowUpdateFrame struct {
Increment uint32 // never read with high bit set
}
func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize)
}
@ -918,12 +953,12 @@ func (f *HeadersFrame) HasPriority() bool {
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
}
func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
hf := &HeadersFrame{
FrameHeader: fh,
}
if fh.StreamID == 0 {
// HEADERS frames MUST be associated with a stream. If a HEADERS frame
// HEADERS frames MUST be associated with a stream. If a HEADERS frame
// is received whose stream identifier field is 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
@ -1045,7 +1080,7 @@ type PriorityParam struct {
Exclusive bool
// Weight is the stream's zero-indexed weight. It should be
// set together with StreamDep, or neither should be set. Per
// set together with StreamDep, or neither should be set. Per
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
@ -1055,7 +1090,7 @@ func (p PriorityParam) IsZero() bool {
return p == PriorityParam{}
}
func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
}
@ -1102,7 +1137,7 @@ type RSTStreamFrame struct {
ErrCode ErrCode
}
func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize)
}
@ -1132,7 +1167,7 @@ type ContinuationFrame struct {
headerFragBuf []byte
}
func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
}
@ -1182,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
}
func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
pp := &PushPromiseFrame{
FrameHeader: fh,
}

View File

@ -7,7 +7,6 @@
package http2
import (
"crypto/tls"
"net/http"
"time"
)
@ -15,29 +14,3 @@ import (
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
return t1.ExpectContinueTimeout
}
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
func isBadCipher(cipher uint16) bool {
switch cipher {
case tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
return true
default:
return false
}
}

View File

@ -12,7 +12,11 @@ import (
"net/http"
)
func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
func cloneTLSConfig(c *tls.Config) *tls.Config {
c2 := c.Clone()
c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
return c2
}
var _ http.Pusher = (*responseWriter)(nil)

16
vendor/golang.org/x/net/http2/go19.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package http2
import (
"net/http"
)
func configureServer19(s *http.Server, conf *Server) error {
s.RegisterOnShutdown(conf.state.startGracefulShutdown)
return nil
}

View File

@ -7,7 +7,6 @@
package http2
import (
"crypto/tls"
"net/http"
"time"
)
@ -20,27 +19,3 @@ func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
return 0
}
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
func isBadCipher(cipher uint16) bool {
switch cipher {
case tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
return true
default:
return false
}
}

16
vendor/golang.org/x/net/http2/not_go19.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.9
package http2
import (
"net/http"
)
func configureServer19(s *http.Server, conf *Server) error {
// not supported prior to go1.9
return nil
}

View File

@ -10,13 +10,13 @@ import (
"sync"
)
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
// underlying buffer is an interface. (io.Pipe is always unbuffered)
type pipe struct {
mu sync.Mutex
c sync.Cond // c.L lazily initialized to &p.mu
b pipeBuffer
c sync.Cond // c.L lazily initialized to &p.mu
b pipeBuffer // nil when done reading
err error // read error once empty. non-nil means closed.
breakErr error // immediate read error (caller doesn't see rest of b)
donec chan struct{} // closed on error
@ -32,6 +32,9 @@ type pipeBuffer interface {
func (p *pipe) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
if p.b == nil {
return 0
}
return p.b.Len()
}
@ -47,7 +50,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
if p.breakErr != nil {
return 0, p.breakErr
}
if p.b.Len() > 0 {
if p.b != nil && p.b.Len() > 0 {
return p.b.Read(d)
}
if p.err != nil {
@ -55,6 +58,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
p.readFn() // e.g. copy trailers
p.readFn = nil // not sticky like p.err
}
p.b = nil
return 0, p.err
}
p.c.Wait()
@ -75,6 +79,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
if p.err != nil {
return 0, errClosedPipeWrite
}
if p.breakErr != nil {
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
}
@ -109,6 +116,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) {
return
}
p.readFn = fn
if dst == &p.breakErr {
p.b = nil
}
*dst = err
p.closeDoneLocked()
}

View File

@ -110,9 +110,41 @@ type Server struct {
// activity for the purposes of IdleTimeout.
IdleTimeout time.Duration
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
// If the value is outside this range, a default value will be
// used instead.
MaxUploadBufferPerConnection int32
// MaxUploadBufferPerStream is the size of the initial flow control
// window for each stream. The HTTP/2 spec does not allow this to
// be larger than 2^32-1. If the value is zero or larger than the
// maximum, a default value will be used instead.
MaxUploadBufferPerStream int32
// NewWriteScheduler constructs a write scheduler for a connection.
// If nil, a default scheduler is chosen.
NewWriteScheduler func() WriteScheduler
// Internal state. This is a pointer (rather than embedded directly)
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
}
func (s *Server) initialConnRecvWindowSize() int32 {
if s.MaxUploadBufferPerConnection > initialWindowSize {
return s.MaxUploadBufferPerConnection
}
return 1 << 20
}
func (s *Server) initialStreamRecvWindowSize() int32 {
if s.MaxUploadBufferPerStream > 0 {
return s.MaxUploadBufferPerStream
}
return 1 << 20
}
func (s *Server) maxReadFrameSize() uint32 {
@ -129,6 +161,40 @@ func (s *Server) maxConcurrentStreams() uint32 {
return defaultMaxStreams
}
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
}
func (s *serverInternalState) registerConn(sc *serverConn) {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
s.activeConns[sc] = struct{}{}
s.mu.Unlock()
}
func (s *serverInternalState) unregisterConn(sc *serverConn) {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
delete(s.activeConns, sc)
s.mu.Unlock()
}
func (s *serverInternalState) startGracefulShutdown() {
if s == nil {
return // if the Server was used without calling ConfigureServer
}
s.mu.Lock()
for sc := range s.activeConns {
sc.startGracefulShutdown()
}
s.mu.Unlock()
}
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@ -141,9 +207,13 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if conf == nil {
conf = new(Server)
}
conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
if err := configureServer18(s, conf); err != nil {
return err
}
if err := configureServer19(s, conf); err != nil {
return err
}
if s.TLSConfig == nil {
s.TLSConfig = new(tls.Config)
@ -255,35 +325,37 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
defer cancel()
sc := &serverConn{
srv: s,
hs: opts.baseConfig(),
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
bw: newBufferedWriter(c),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
wantWriteFrameCh: make(chan FrameWriteRequest, 8),
wantStartPushCh: make(chan startPushRequest, 8),
wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
advMaxStreams: s.maxConcurrentStreams(),
initialWindowSize: initialWindowSize,
maxFrameSize: initialMaxFrameSize,
headerTableSize: initialHeaderTableSize,
serveG: newGoroutineLock(),
pushEnabled: true,
srv: s,
hs: opts.baseConfig(),
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
bw: newBufferedWriter(c),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
wantWriteFrameCh: make(chan FrameWriteRequest, 8),
serveMsgCh: make(chan interface{}, 8),
wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
advMaxStreams: s.maxConcurrentStreams(),
initialStreamSendWindowSize: initialWindowSize,
maxFrameSize: initialMaxFrameSize,
headerTableSize: initialHeaderTableSize,
serveG: newGoroutineLock(),
pushEnabled: true,
}
s.state.registerConn(sc)
defer s.state.unregisterConn(sc)
// The net/http package sets the write deadline from the
// http.Server.WriteTimeout during the TLS handshake, but then
// passes the connection off to us with the deadline already
// set. Disarm it here so that it is not applied to additional
// streams opened on this connection.
// TODO: implement WriteTimeout fully. See Issue 18437.
// passes the connection off to us with the deadline already set.
// Write deadlines are set per stream in serverConn.newStream.
// Disarm the net.Conn write deadline here.
if sc.hs.WriteTimeout != 0 {
sc.conn.SetWriteDeadline(time.Time{})
}
@ -294,6 +366,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
sc.writeSched = NewRandomWriteScheduler()
}
// These start at the RFC-specified defaults. If there is a higher
// configured value for inflow, that will be updated when we send a
// WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(initialWindowSize)
sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
@ -376,10 +451,9 @@ type serverConn struct {
doneServing chan struct{} // closed when serverConn.serve ends
readFrameCh chan readFrameResult // written by serverConn.readFrames
wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
wantStartPushCh chan startPushRequest // from handlers -> serve
wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
bodyReadCh chan bodyReadMsg // from handlers -> serve
testHookCh chan func(int) // code to run on the serve loop
serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
flow flow // conn-wide (not stream-specific) outbound flow control
inflow flow // conn-wide inbound flow control
tlsState *tls.ConnectionState // shared by all handlers, like net/http
@ -387,38 +461,39 @@ type serverConn struct {
writeSched WriteScheduler
// Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve()
pushEnabled bool
sawFirstSettings bool // got the initial SETTINGS frame after the preface
needToSendSettingsAck bool
unackedSettings int // how many SETTINGS have we sent without ACKs?
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client
curPushedStreams uint32 // number of open streams initiated by server push
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*stream
initialWindowSize int32
maxFrameSize int32
headerTableSize uint32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
writingFrame bool // started writing a frame (on serve goroutine or separate)
writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
needsFrameFlush bool // last frame write wasn't a flush
inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
goAwayCode ErrCode
shutdownTimerCh <-chan time.Time // nil until used
shutdownTimer *time.Timer // nil until used
idleTimer *time.Timer // nil if unused
idleTimerCh <-chan time.Time // nil if unused
serveG goroutineLock // used to verify funcs are on serve()
pushEnabled bool
sawFirstSettings bool // got the initial SETTINGS frame after the preface
needToSendSettingsAck bool
unackedSettings int // how many SETTINGS have we sent without ACKs?
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client
curPushedStreams uint32 // number of open streams initiated by server push
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*stream
initialStreamSendWindowSize int32
maxFrameSize int32
headerTableSize uint32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
writingFrame bool // started writing a frame (on serve goroutine or separate)
writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
needsFrameFlush bool // last frame write wasn't a flush
inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
goAwayCode ErrCode
shutdownTimer *time.Timer // nil until used
idleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
hpackEncoder *hpack.Encoder
// Used by startGracefulShutdown.
shutdownOnce sync.Once
}
func (sc *serverConn) maxHeaderListSize() uint32 {
@ -463,10 +538,10 @@ type stream struct {
numTrailerValues int64
weight uint8
state streamState
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen
wroteHeaders bool // whether we wrote headers (not status 100)
reqBuf []byte // if non-nil, body pipe buffer to return later at EOF
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen
wroteHeaders bool // whether we wrote headers (not status 100)
writeDeadline *time.Timer // nil if unused
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@ -696,48 +771,48 @@ func (sc *serverConn) serve() {
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
// TODO: more actual settings, notably
// SettingInitialWindowSize, but then we also
// want to bump up the conn window size the
// same amount here right after the settings
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
},
})
sc.unackedSettings++
// Each connection starts with intialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
if err := sc.readPreface(); err != nil {
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
return
}
// Now that we've got the preface, get us out of the
// "StateNew" state. We can't go directly to idle, though.
// "StateNew" state. We can't go directly to idle, though.
// Active means we read some data and anticipate a request. We'll
// do another Active when we get a HEADERS frame.
sc.setConnState(http.StateActive)
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout != 0 {
sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout)
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
sc.idleTimerCh = sc.idleTimer.C
}
var gracefulShutdownCh <-chan struct{}
if sc.hs != nil {
gracefulShutdownCh = h1ServerShutdownChan(sc.hs)
}
go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := time.NewTimer(firstSettingsTimeout)
settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
loopNum := 0
for {
loopNum++
select {
case wr := <-sc.wantWriteFrameCh:
if se, ok := wr.write.(StreamError); ok {
sc.resetStream(se)
break
}
sc.writeFrame(wr)
case spr := <-sc.wantStartPushCh:
sc.startPush(spr)
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
@ -745,26 +820,37 @@ func (sc *serverConn) serve() {
return
}
res.readMore()
if settingsTimer.C != nil {
if settingsTimer != nil {
settingsTimer.Stop()
settingsTimer.C = nil
settingsTimer = nil
}
case m := <-sc.bodyReadCh:
sc.noteBodyRead(m.st, m.n)
case <-settingsTimer.C:
sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
return
case <-gracefulShutdownCh:
gracefulShutdownCh = nil
sc.startGracefulShutdown()
case <-sc.shutdownTimerCh:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
case <-sc.idleTimerCh:
sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo)
case fn := <-sc.testHookCh:
fn(loopNum)
case msg := <-sc.serveMsgCh:
switch v := msg.(type) {
case func(int):
v(loopNum) // for testing
case *serverMessage:
switch v {
case settingsTimerMsg:
sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
return
case idleTimerMsg:
sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo)
case shutdownTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
case gracefulShutdownMsg:
sc.startGracefulShutdownInternal()
default:
panic("unknown timer")
}
case *startPushRequest:
sc.startPush(v)
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
}
if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame {
@ -773,6 +859,36 @@ func (sc *serverConn) serve() {
}
}
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
select {
case <-sc.doneServing:
case <-sharedCh:
close(privateCh)
}
}
type serverMessage int
// Message values sent to serveMsgCh.
var (
settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
)
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) {
sc.serveG.checkNotOn() // NOT
select {
case sc.serveMsgCh <- msg:
case <-sc.doneServing:
}
}
// readPreface reads the ClientPreface greeting from the peer
// or returns an error on timeout or an invalid greeting.
func (sc *serverConn) readPreface() error {
@ -1014,7 +1130,11 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// stateClosed after the RST_STREAM frame is
// written.
st.state = stateHalfClosedLocal
sc.resetStream(streamError(st.id, ErrCodeCancel))
// Section 8.1: a server MAY request that the client abort
// transmission of a request without error by sending a
// RST_STREAM with an error code of NO_ERROR after sending
// a complete response.
sc.resetStream(streamError(st.id, ErrCodeNo))
case stateHalfClosedRemote:
sc.closeStream(st, errHandlerComplete)
}
@ -1086,10 +1206,19 @@ func (sc *serverConn) scheduleFrameWrite() {
sc.inFrameScheduleLoop = false
}
// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the
// client we're gracefully shutting down. The connection isn't closed
// until all current streams are done.
// startGracefulShutdown gracefully shuts down a connection. This
// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
// shutting down. The connection isn't closed until all current
// streams are done.
//
// startGracefulShutdown returns immediately; it does not wait until
// the connection has shut down.
func (sc *serverConn) startGracefulShutdown() {
sc.serveG.checkNotOn() // NOT
sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
}
func (sc *serverConn) startGracefulShutdownInternal() {
sc.goAwayIn(ErrCodeNo, 0)
}
@ -1121,8 +1250,7 @@ func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) {
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
sc.shutdownTimer = time.NewTimer(d)
sc.shutdownTimerCh = sc.shutdownTimer.C
sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
@ -1305,6 +1433,9 @@ func (sc *serverConn) closeStream(st *stream, err error) {
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
}
st.state = stateClosed
if st.writeDeadline != nil {
st.writeDeadline.Stop()
}
if st.isPushed() {
sc.curPushedStreams--
} else {
@ -1317,7 +1448,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
sc.idleTimer.Reset(sc.srv.IdleTimeout)
}
if h1ServerKeepAlivesDisabled(sc.hs) {
sc.startGracefulShutdown()
sc.startGracefulShutdownInternal()
}
}
if p := st.body; p != nil {
@ -1395,9 +1526,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
// adjust the size of all stream flow control windows that it
// maintains by the difference between the new value and the
// old value."
old := sc.initialWindowSize
sc.initialWindowSize = int32(val)
growth := sc.initialWindowSize - old // may be negative
old := sc.initialStreamSendWindowSize
sc.initialStreamSendWindowSize = int32(val)
growth := int32(val) - old // may be negative
for _, st := range sc.streams {
if !st.flow.add(growth) {
// 6.9.2 Initial Flow Control Window Size
@ -1504,7 +1635,7 @@ func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
} else {
sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
}
sc.startGracefulShutdown()
sc.startGracefulShutdownInternal()
// http://tools.ietf.org/html/rfc7540#section-6.8
// We should not create any new streams, which means we should disable push.
sc.pushEnabled = false
@ -1543,6 +1674,12 @@ func (st *stream) copyTrailersToHandlerRequest() {
}
}
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
// when the stream's WriteTimeout has fired.
func (st *stream) onWriteTimeout() {
st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
}
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
sc.serveG.check()
id := f.StreamID
@ -1719,9 +1856,12 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
}
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialWindowSize)
st.inflow.conn = &sc.inflow // link to conn-level counter
st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.conn = &sc.inflow // link to conn-level counter
st.inflow.add(sc.srv.initialStreamRecvWindowSize())
if sc.hs.WriteTimeout != 0 {
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
@ -1785,16 +1925,14 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return nil, nil, err
}
if bodyOpen {
st.reqBuf = getRequestBodyBuf()
req.Body.(*requestBody).pipe = &pipe{
b: &fixedBuffer{buf: st.reqBuf},
}
if vv, ok := rp.header["Content-Length"]; ok {
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
} else {
req.ContentLength = -1
}
req.Body.(*requestBody).pipe = &pipe{
b: &dataBuffer{expected: req.ContentLength},
}
}
return rw, req, nil
}
@ -1890,24 +2028,6 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
return rw, req, nil
}
var reqBodyCache = make(chan []byte, 8)
func getRequestBodyBuf() []byte {
select {
case b := <-reqBodyCache:
return b
default:
return make([]byte, initialWindowSize)
}
}
func putRequestBodyBuf(b []byte) {
select {
case reqBodyCache <- b:
default:
}
}
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
didPanic := true
@ -2003,12 +2123,6 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
case <-sc.doneServing:
}
}
if err == io.EOF {
if buf := st.reqBuf; buf != nil {
st.reqBuf = nil // shouldn't matter; field unused by other
putRequestBodyBuf(buf)
}
}
}
func (sc *serverConn) noteBodyRead(st *stream, n int) {
@ -2103,8 +2217,8 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
return
}
// responseWriter is the http.ResponseWriter implementation. It's
// intentionally small (1 pointer wide) to minimize garbage. The
// responseWriter is the http.ResponseWriter implementation. It's
// intentionally small (1 pointer wide) to minimize garbage. The
// responseWriterState pointer inside is zeroed at the end of a
// request (in handlerDone) and calls on the responseWriter thereafter
// simply crash (caller's mistake), but the much larger responseWriterState
@ -2278,7 +2392,7 @@ const TrailerPrefix = "Trailer:"
// says you SHOULD (but not must) predeclare any trailers in the
// header, the official ResponseWriter rules said trailers in Go must
// be predeclared, and then we reuse the same ResponseWriter.Header()
// map to mean both Headers and Trailers. When it's time to write the
// map to mean both Headers and Trailers. When it's time to write the
// Trailers, we pick out the fields of Headers that were declared as
// trailers. That worked for a while, until we found the first major
// user of Trailers in the wild: gRPC (using them only over http2),
@ -2514,7 +2628,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error {
return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
}
msg := startPushRequest{
msg := &startPushRequest{
parent: st,
method: opts.Method,
url: u,
@ -2527,7 +2641,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error {
return errClientDisconnected
case <-st.cw:
return errStreamClosed
case sc.wantStartPushCh <- msg:
case sc.serveMsgCh <- msg:
}
select {
@ -2549,7 +2663,7 @@ type startPushRequest struct {
done chan error
}
func (sc *serverConn) startPush(msg startPushRequest) {
func (sc *serverConn) startPush(msg *startPushRequest) {
sc.serveG.check()
// http://tools.ietf.org/html/rfc7540#section-6.6.
@ -2588,7 +2702,7 @@ func (sc *serverConn) startPush(msg startPushRequest) {
// A server that is unable to establish a new stream identifier can send a GOAWAY
// frame so that the client is forced to open a new connection for new streams.
if sc.maxPushPromiseID+2 >= 1<<31 {
sc.startGracefulShutdown()
sc.startGracefulShutdownInternal()
return 0, ErrPushLimitReached
}
sc.maxPushPromiseID += 2
@ -2713,31 +2827,6 @@ var badTrailer = map[string]bool{
"Www-Authenticate": true,
}
// h1ServerShutdownChan returns a channel that will be closed when the
// provided *http.Server wants to shut down.
//
// This is a somewhat hacky way to get at http1 innards. It works
// when the http2 code is bundled into the net/http package in the
// standard library. The alternatives ended up making the cmd/go tool
// depend on http Servers. This is the lightest option for now.
// This is tested via the TestServeShutdown* tests in net/http.
func h1ServerShutdownChan(hs *http.Server) <-chan struct{} {
if fn := testh1ServerShutdownChan; fn != nil {
return fn(hs)
}
var x interface{} = hs
type I interface {
getDoneChan() <-chan struct{}
}
if hs, ok := x.(I); ok {
return hs.getDoneChan()
}
return nil
}
// optional test hook for h1ServerShutdownChan.
var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{}
// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
// disabled. See comments on h1ServerShutdownChan above for why
// the code is written this way.

View File

@ -575,7 +575,7 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
cc.nextStreamID < math.MaxInt32
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time,
// so this simply calls the synchronized closeIfIdle to shut down this
@ -809,8 +809,8 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// 2xx, however, then assume the server DOES potentially
// want our body (e.g. full-duplex streaming:
// golang.org/issue/13444). If it turns out the server
// doesn't, they'll RST_STREAM us soon enough. This is a
// heuristic to avoid adding knobs to Transport. Hopefully
// doesn't, they'll RST_STREAM us soon enough. This is a
// heuristic to avoid adding knobs to Transport. Hopefully
// we can keep it.
bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWrite)
@ -1528,8 +1528,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
return res, nil
}
buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
cs.bufPipe = pipe{b: buf}
cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
cs.bytesRemain = res.ContentLength
res.Body = transportResponseBody{cs}
go cs.awaitRequestCancel(cs.req)
@ -1656,6 +1655,7 @@ func (b transportResponseBody) Close() error {
cc.wmu.Lock()
if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
cs.didReset = true
}
// Return connection-level flow control.
if unread > 0 {
@ -1703,12 +1703,6 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
return nil
}
if f.Length > 0 {
if len(data) > 0 && cs.bufPipe.b == nil {
// Data frame after it's already closed?
cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
return ConnectionError(ErrCodeProtocol)
}
// Check connection-level flow control.
cc.mu.Lock()
if cs.inflow.available() >= int32(f.Length) {

View File

@ -53,7 +53,7 @@ type PriorityWriteSchedulerConfig struct {
}
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3.
// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
// If cfg is nil, default options are used.
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
if cfg == nil {

View File

@ -0,0 +1,38 @@
// Copyright 2012 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package transport contains HTTP transports used to make
// authenticated API requests.
package transport
import (
"errors"
"net/http"
)
// APIKey is an HTTP Transport which wraps an underlying transport and
// appends an API Key "key" parameter to the URL of outgoing requests.
type APIKey struct {
// Key is the API Key to set on requests.
Key string
// Transport is the underlying HTTP transport.
// If nil, http.DefaultTransport is used.
Transport http.RoundTripper
}
func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
rt := t.Transport
if rt == nil {
rt = http.DefaultTransport
if rt == nil {
return nil, errors.New("googleapi/transport: no Transport specified or available")
}
}
newReq := *req
args := newReq.URL.Query()
args.Set("key", t.Key)
newReq.URL.RawQuery = args.Encode()
return rt.RoundTrip(&newReq)
}

59
vendor/google.golang.org/api/internal/pool.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"errors"
"google.golang.org/grpc/naming"
)
// PoolResolver provides a fixed list of addresses to load balance between
// and does not provide further updates.
type PoolResolver struct {
poolSize int
dialOpt *DialSettings
ch chan []*naming.Update
}
// NewPoolResolver returns a PoolResolver
// This is an EXPERIMENTAL API and may be changed or removed in the future.
func NewPoolResolver(size int, o *DialSettings) *PoolResolver {
return &PoolResolver{poolSize: size, dialOpt: o}
}
// Resolve returns a Watcher for the endpoint defined by the DialSettings
// provided to NewPoolResolver.
func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) {
if r.dialOpt.Endpoint == "" {
return nil, errors.New("No endpoint configured")
}
addrs := make([]*naming.Update, 0, r.poolSize)
for i := 0; i < r.poolSize; i++ {
addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i})
}
r.ch = make(chan []*naming.Update, 1)
r.ch <- addrs
return r, nil
}
// Next returns a static list of updates on the first call,
// and blocks indefinitely until Close is called on subsequent calls.
func (r *PoolResolver) Next() ([]*naming.Update, error) {
return <-r.ch, nil
}
func (r *PoolResolver) Close() {
close(r.ch)
}

37
vendor/google.golang.org/api/internal/settings.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal supports the options and transport packages.
package internal
import (
"net/http"
"golang.org/x/oauth2"
"google.golang.org/grpc"
)
// DialSettings holds information needed to establish a connection with a
// Google API service.
type DialSettings struct {
Endpoint string
Scopes []string
ServiceAccountJSONFilename string // if set, TokenSource is ignored.
TokenSource oauth2.TokenSource
UserAgent string
APIKey string
HTTPClient *http.Client
GRPCDialOpts []grpc.DialOption
GRPCConn *grpc.ClientConn
}

231
vendor/google.golang.org/api/iterator/iterator.go generated vendored Normal file
View File

@ -0,0 +1,231 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package iterator provides support for standard Google API iterators.
// See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines.
package iterator
import (
"errors"
"fmt"
"reflect"
)
// Done is returned by an iterator's Next method when the iteration is
// complete; when there are no more items to return.
var Done = errors.New("no more items in iterator")
// We don't support mixed calls to Next and NextPage because they play
// with the paging state in incompatible ways.
var errMixed = errors.New("iterator: Next and NextPage called on same iterator")
// PageInfo contains information about an iterator's paging state.
type PageInfo struct {
// Token is the token used to retrieve the next page of items from the
// API. You may set Token immediately after creating an iterator to
// begin iteration at a particular point. If Token is the empty string,
// the iterator will begin with the first eligible item.
//
// The result of setting Token after the first call to Next is undefined.
//
// After the underlying API method is called to retrieve a page of items,
// Token is set to the next-page token in the response.
Token string
// MaxSize is the maximum number of items returned by a call to the API.
// Set MaxSize as a hint to optimize the buffering behavior of the iterator.
// If zero, the page size is determined by the underlying service.
//
// Use Pager to retrieve a page of a specific, exact size.
MaxSize int
// The error state of the iterator. Manipulated by PageInfo.next and Pager.
// This is a latch: it starts as nil, and once set should never change.
err error
// If true, no more calls to fetch should be made. Set to true when fetch
// returns an empty page token. The iterator is Done when this is true AND
// the buffer is empty.
atEnd bool
// Function that fetches a page from the underlying service. It should pass
// the pageSize and pageToken arguments to the service, fill the buffer
// with the results from the call, and return the next-page token returned
// by the service. The function must not remove any existing items from the
// buffer. If the underlying RPC takes an int32 page size, pageSize should
// be silently truncated.
fetch func(pageSize int, pageToken string) (nextPageToken string, err error)
// Function that clears the iterator's buffer, returning any currently buffered items.
bufLen func() int
// Function that returns the buffer, after setting the buffer variable to nil.
takeBuf func() interface{}
// Set to true on first call to PageInfo.next or Pager.NextPage. Used to check
// for calls to both Next and NextPage with the same iterator.
nextCalled, nextPageCalled bool
}
// NewPageInfo exposes internals for iterator implementations.
// It is not a stable interface.
var NewPageInfo = newPageInfo
// If an iterator can support paging, its iterator-creating method should call
// this (via the NewPageInfo variable above).
//
// The fetch, bufLen and takeBuf arguments provide access to the
// iterator's internal slice of buffered items. They behave as described in
// PageInfo, above.
//
// The return value is the PageInfo.next method bound to the returned PageInfo value.
// (Returning it avoids exporting PageInfo.next.)
func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) {
pi := &PageInfo{
fetch: fetch,
bufLen: bufLen,
takeBuf: takeBuf,
}
return pi, pi.next
}
// Remaining returns the number of items available before the iterator makes another API call.
func (pi *PageInfo) Remaining() int { return pi.bufLen() }
// next provides support for an iterator's Next function. An iterator's Next
// should return the error returned by next if non-nil; else it can assume
// there is at least one item in its buffer, and it should return that item and
// remove it from the buffer.
func (pi *PageInfo) next() error {
pi.nextCalled = true
if pi.err != nil { // Once we get an error, always return it.
// TODO(jba): fix so users can retry on transient errors? Probably not worth it.
return pi.err
}
if pi.nextPageCalled {
pi.err = errMixed
return pi.err
}
// Loop until we get some items or reach the end.
for pi.bufLen() == 0 && !pi.atEnd {
if err := pi.fill(pi.MaxSize); err != nil {
pi.err = err
return pi.err
}
if pi.Token == "" {
pi.atEnd = true
}
}
// Either the buffer is non-empty or pi.atEnd is true (or both).
if pi.bufLen() == 0 {
// The buffer is empty and pi.atEnd is true, i.e. the service has no
// more items.
pi.err = Done
}
return pi.err
}
// Call the service to fill the buffer, using size and pi.Token. Set pi.Token to the
// next-page token returned by the call.
// If fill returns a non-nil error, the buffer will be empty.
func (pi *PageInfo) fill(size int) error {
tok, err := pi.fetch(size, pi.Token)
if err != nil {
pi.takeBuf() // clear the buffer
return err
}
pi.Token = tok
return nil
}
// Pageable is implemented by iterators that support paging.
type Pageable interface {
// PageInfo returns paging information associated with the iterator.
PageInfo() *PageInfo
}
// Pager supports retrieving iterator items a page at a time.
type Pager struct {
pageInfo *PageInfo
pageSize int
}
// NewPager returns a pager that uses iter. Calls to its NextPage method will
// obtain exactly pageSize items, unless fewer remain. The pageToken argument
// indicates where to start the iteration. Pass the empty string to start at
// the beginning, or pass a token retrieved from a call to Pager.NextPage.
//
// If you use an iterator with a Pager, you must not call Next on the iterator.
func NewPager(iter Pageable, pageSize int, pageToken string) *Pager {
p := &Pager{
pageInfo: iter.PageInfo(),
pageSize: pageSize,
}
p.pageInfo.Token = pageToken
if pageSize <= 0 {
p.pageInfo.err = errors.New("iterator: page size must be positive")
}
return p
}
// NextPage retrieves a sequence of items from the iterator and appends them
// to slicep, which must be a pointer to a slice of the iterator's item type.
// Exactly p.pageSize items will be appended, unless fewer remain.
//
// The first return value is the page token to use for the next page of items.
// If empty, there are no more pages. Aside from checking for the end of the
// iteration, the returned page token is only needed if the iteration is to be
// resumed a later time, in another context (possibly another process).
//
// The second return value is non-nil if an error occurred. It will never be
// the special iterator sentinel value Done. To recognize the end of the
// iteration, compare nextPageToken to the empty string.
//
// It is possible for NextPage to return a single zero-length page along with
// an empty page token when there are no more items in the iteration.
func (p *Pager) NextPage(slicep interface{}) (nextPageToken string, err error) {
p.pageInfo.nextPageCalled = true
if p.pageInfo.err != nil {
return "", p.pageInfo.err
}
if p.pageInfo.nextCalled {
p.pageInfo.err = errMixed
return "", p.pageInfo.err
}
if p.pageInfo.bufLen() > 0 {
return "", errors.New("must call NextPage with an empty buffer")
}
// The buffer must be empty here, so takeBuf is a no-op. We call it just to get
// the buffer's type.
wantSliceType := reflect.PtrTo(reflect.ValueOf(p.pageInfo.takeBuf()).Type())
if slicep == nil {
return "", errors.New("nil passed to Pager.NextPage")
}
vslicep := reflect.ValueOf(slicep)
if vslicep.Type() != wantSliceType {
return "", fmt.Errorf("slicep should be of type %s, got %T", wantSliceType, slicep)
}
for p.pageInfo.bufLen() < p.pageSize {
if err := p.pageInfo.fill(p.pageSize - p.pageInfo.bufLen()); err != nil {
p.pageInfo.err = err
return "", p.pageInfo.err
}
if p.pageInfo.Token == "" {
break
}
}
e := vslicep.Elem()
e.Set(reflect.AppendSlice(e, reflect.ValueOf(p.pageInfo.takeBuf())))
return p.pageInfo.Token, nil
}

156
vendor/google.golang.org/api/option/option.go generated vendored Normal file
View File

@ -0,0 +1,156 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package option contains options for Google API clients.
package option
import (
"net/http"
"golang.org/x/oauth2"
"google.golang.org/api/internal"
"google.golang.org/grpc"
)
// A ClientOption is an option for a Google API client.
type ClientOption interface {
Apply(*internal.DialSettings)
}
// WithTokenSource returns a ClientOption that specifies an OAuth2 token
// source to be used as the basis for authentication.
func WithTokenSource(s oauth2.TokenSource) ClientOption {
return withTokenSource{s}
}
type withTokenSource struct{ ts oauth2.TokenSource }
func (w withTokenSource) Apply(o *internal.DialSettings) {
o.TokenSource = w.ts
}
// WithServiceAccountFile returns a ClientOption that uses a Google service
// account credentials file to authenticate.
// Use WithTokenSource with a token source created from
// golang.org/x/oauth2/google.JWTConfigFromJSON
// if reading the file from disk is not an option.
func WithServiceAccountFile(filename string) ClientOption {
return withServiceAccountFile(filename)
}
type withServiceAccountFile string
func (w withServiceAccountFile) Apply(o *internal.DialSettings) {
o.ServiceAccountJSONFilename = string(w)
}
// WithEndpoint returns a ClientOption that overrides the default endpoint
// to be used for a service.
func WithEndpoint(url string) ClientOption {
return withEndpoint(url)
}
type withEndpoint string
func (w withEndpoint) Apply(o *internal.DialSettings) {
o.Endpoint = string(w)
}
// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
// to be used for a service.
func WithScopes(scope ...string) ClientOption {
return withScopes(scope)
}
type withScopes []string
func (w withScopes) Apply(o *internal.DialSettings) {
s := make([]string, len(w))
copy(s, w)
o.Scopes = s
}
// WithUserAgent returns a ClientOption that sets the User-Agent.
func WithUserAgent(ua string) ClientOption {
return withUA(ua)
}
type withUA string
func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) }
// WithHTTPClient returns a ClientOption that specifies the HTTP client to use
// as the basis of communications. This option may only be used with services
// that support HTTP as their communication transport. When used, the
// WithHTTPClient option takes precedent over all other supplied options.
func WithHTTPClient(client *http.Client) ClientOption {
return withHTTPClient{client}
}
type withHTTPClient struct{ client *http.Client }
func (w withHTTPClient) Apply(o *internal.DialSettings) {
o.HTTPClient = w.client
}
// WithGRPCConn returns a ClientOption that specifies the gRPC client
// connection to use as the basis of communications. This option many only be
// used with services that support gRPC as their communication transport. When
// used, the WithGRPCConn option takes precedent over all other supplied
// options.
func WithGRPCConn(conn *grpc.ClientConn) ClientOption {
return withGRPCConn{conn}
}
type withGRPCConn struct{ conn *grpc.ClientConn }
func (w withGRPCConn) Apply(o *internal.DialSettings) {
o.GRPCConn = w.conn
}
// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption
// to an underlying gRPC dial. It does not work with WithGRPCConn.
func WithGRPCDialOption(opt grpc.DialOption) ClientOption {
return withGRPCDialOption{opt}
}
type withGRPCDialOption struct{ opt grpc.DialOption }
func (w withGRPCDialOption) Apply(o *internal.DialSettings) {
o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt)
}
// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC
// connections that requests will be balanced between.
// This is an EXPERIMENTAL API and may be changed or removed in the future.
func WithGRPCConnectionPool(size int) ClientOption {
return withGRPCConnectionPool(size)
}
type withGRPCConnectionPool int
func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) {
balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o))
o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer))
}
// WithAPIKey returns a ClientOption that specifies an API key to be used
// as the basis for authentication.
func WithAPIKey(apiKey string) ClientOption {
return withAPIKey(apiKey)
}
type withAPIKey string
func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) }

201
vendor/google.golang.org/api/transport/dial.go generated vendored Normal file
View File

@ -0,0 +1,201 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transport supports network connections to HTTP and GRPC servers.
// This package is not intended for use by end developers. Use the
// google.golang.org/api/option package to configure API clients.
package transport
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/oauth"
gtransport "google.golang.org/api/googleapi/transport"
"google.golang.org/api/internal"
"google.golang.org/api/option"
)
// NewHTTPClient returns an HTTP client for use communicating with a Google cloud
// service, configured with the given ClientOptions. It also returns the endpoint
// for the service as specified in the options.
func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) {
var o internal.DialSettings
for _, opt := range opts {
opt.Apply(&o)
}
if o.GRPCConn != nil {
return nil, "", errors.New("unsupported gRPC connection specified")
}
// TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided?
if o.HTTPClient != nil {
return o.HTTPClient, o.Endpoint, nil
}
if o.APIKey != "" {
hc := &http.Client{
Transport: &gtransport.APIKey{
Key: o.APIKey,
Transport: userAgentTransport{
base: baseTransport(ctx),
userAgent: o.UserAgent,
},
},
}
return hc, o.Endpoint, nil
}
if o.ServiceAccountJSONFilename != "" {
ts, err := serviceAcctTokenSource(ctx, o.ServiceAccountJSONFilename, o.Scopes...)
if err != nil {
return nil, "", err
}
o.TokenSource = ts
}
if o.TokenSource == nil {
var err error
o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
if err != nil {
return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err)
}
}
hc := &http.Client{
Transport: &oauth2.Transport{
Source: o.TokenSource,
Base: userAgentTransport{
base: baseTransport(ctx),
userAgent: o.UserAgent,
},
},
}
return hc, o.Endpoint, nil
}
type userAgentTransport struct {
userAgent string
base http.RoundTripper
}
func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) {
rt := t.base
if rt == nil {
return nil, errors.New("transport: no Transport specified")
}
if t.userAgent == "" {
return rt.RoundTrip(req)
}
newReq := *req
newReq.Header = make(http.Header)
for k, vv := range req.Header {
newReq.Header[k] = vv
}
// TODO(cbro): append to existing User-Agent header?
newReq.Header["User-Agent"] = []string{t.userAgent}
return rt.RoundTrip(&newReq)
}
// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
var appengineDialerHook func(context.Context) grpc.DialOption
var appengineUrlfetchHook func(context.Context) http.RoundTripper
// baseTransport returns the base HTTP transport.
// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport.
func baseTransport(ctx context.Context) http.RoundTripper {
if appengineUrlfetchHook != nil {
return appengineUrlfetchHook(ctx)
}
return http.DefaultTransport
}
// DialGRPC returns a GRPC connection for use communicating with a Google cloud
// service, configured with the given ClientOptions.
func DialGRPC(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
var o internal.DialSettings
for _, opt := range opts {
opt.Apply(&o)
}
if o.HTTPClient != nil {
return nil, errors.New("unsupported HTTP client specified")
}
if o.GRPCConn != nil {
return o.GRPCConn, nil
}
if o.ServiceAccountJSONFilename != "" {
ts, err := serviceAcctTokenSource(ctx, o.ServiceAccountJSONFilename, o.Scopes...)
if err != nil {
return nil, err
}
o.TokenSource = ts
}
if o.TokenSource == nil {
var err error
o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
if err != nil {
return nil, fmt.Errorf("google.DefaultTokenSource: %v", err)
}
}
grpcOpts := []grpc.DialOption{
grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}),
grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
}
if appengineDialerHook != nil {
// Use the Socket API on App Engine.
grpcOpts = append(grpcOpts, appengineDialerHook(ctx))
}
grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
if o.UserAgent != "" {
grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
}
return grpc.DialContext(ctx, o.Endpoint, grpcOpts...)
}
func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("cannot read service account file: %v", err)
}
cfg, err := google.JWTConfigFromJSON(data, scope...)
if err != nil {
return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err)
}
return cfg.TokenSource(ctx), nil
}
// DialGRPCInsecure returns an insecure GRPC connection for use communicating
// with fake or mock Google cloud service implementations, such as emulators.
// The connection is configured with the given ClientOptions.
func DialGRPCInsecure(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
var o internal.DialSettings
for _, opt := range opts {
opt.Apply(&o)
}
if o.HTTPClient != nil {
return nil, errors.New("unsupported HTTP client specified")
}
if o.GRPCConn != nil {
return o.GRPCConn, nil
}
grpcOpts := []grpc.DialOption{grpc.WithInsecure()}
grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
if o.UserAgent != "" {
grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
}
return grpc.DialContext(ctx, o.Endpoint, grpcOpts...)
}

202
vendor/google.golang.org/genproto/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,64 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/annotations.proto
/*
Package annotations is a generated protocol buffer package.
It is generated from these files:
google/api/annotations.proto
google/api/http.proto
It has these top-level messages:
Http
HttpRule
CustomHttpPattern
*/
package annotations
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
var E_Http = &proto.ExtensionDesc{
ExtendedType: (*google_protobuf.MethodOptions)(nil),
ExtensionType: (*HttpRule)(nil),
Field: 72295728,
Name: "google.api.http",
Tag: "bytes,72295728,opt,name=http",
Filename: "google/api/annotations.proto",
}
func init() {
proto.RegisterExtension(E_Http)
}
func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 208 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc,
0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64,
0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79,
0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15,
0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53,
0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51,
0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a,
0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08,
0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5,
0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64,
0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d,
0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,566 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/http.proto
package annotations
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Defines the HTTP configuration for a service. It contains a list of
// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
// to one or more HTTP REST API methods.
type Http struct {
// A list of HTTP configuration rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*HttpRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
}
func (m *Http) Reset() { *m = Http{} }
func (m *Http) String() string { return proto.CompactTextString(m) }
func (*Http) ProtoMessage() {}
func (*Http) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (m *Http) GetRules() []*HttpRule {
if m != nil {
return m.Rules
}
return nil
}
// `HttpRule` defines the mapping of an RPC method to one or more HTTP
// REST APIs. The mapping determines what portions of the request
// message are populated from the path, query parameters, or body of
// the HTTP request. The mapping is typically specified as an
// `google.api.http` annotation, see "google/api/annotations.proto"
// for details.
//
// The mapping consists of a field specifying the path template and
// method kind. The path template can refer to fields in the request
// message, as in the example below which describes a REST GET
// operation on a resource collection of messages:
//
//
// service Messaging {
// rpc GetMessage(GetMessageRequest) returns (Message) {
// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
// }
// }
// message GetMessageRequest {
// message SubMessage {
// string subfield = 1;
// }
// string message_id = 1; // mapped to the URL
// SubMessage sub = 2; // `sub.subfield` is url-mapped
// }
// message Message {
// string text = 1; // content of the resource
// }
//
// The same http annotation can alternatively be expressed inside the
// `GRPC API Configuration` YAML file.
//
// http:
// rules:
// - selector: <proto_package_name>.Messaging.GetMessage
// get: /v1/messages/{message_id}/{sub.subfield}
//
// This definition enables an automatic, bidrectional mapping of HTTP
// JSON to RPC. Example:
//
// HTTP | RPC
// -----|-----
// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
//
// In general, not only fields but also field paths can be referenced
// from a path pattern. Fields mapped to the path pattern cannot be
// repeated and must have a primitive (non-message) type.
//
// Any fields in the request message which are not bound by the path
// pattern automatically become (optional) HTTP query
// parameters. Assume the following definition of the request message:
//
//
// message GetMessageRequest {
// message SubMessage {
// string subfield = 1;
// }
// string message_id = 1; // mapped to the URL
// int64 revision = 2; // becomes a parameter
// SubMessage sub = 3; // `sub.subfield` becomes a parameter
// }
//
//
// This enables a HTTP JSON to RPC mapping as below:
//
// HTTP | RPC
// -----|-----
// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
//
// Note that fields which are mapped to HTTP parameters must have a
// primitive type or a repeated primitive type. Message types are not
// allowed. In the case of a repeated type, the parameter can be
// repeated in the URL, as in `...?param=A&param=B`.
//
// For HTTP method kinds which allow a request body, the `body` field
// specifies the mapping. Consider a REST update method on the
// message resource collection:
//
//
// service Messaging {
// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
// option (google.api.http) = {
// put: "/v1/messages/{message_id}"
// body: "message"
// };
// }
// }
// message UpdateMessageRequest {
// string message_id = 1; // mapped to the URL
// Message message = 2; // mapped to the body
// }
//
//
// The following HTTP JSON to RPC mapping is enabled, where the
// representation of the JSON in the request body is determined by
// protos JSON encoding:
//
// HTTP | RPC
// -----|-----
// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
//
// The special name `*` can be used in the body mapping to define that
// every field not bound by the path template should be mapped to the
// request body. This enables the following alternative definition of
// the update method:
//
// service Messaging {
// rpc UpdateMessage(Message) returns (Message) {
// option (google.api.http) = {
// put: "/v1/messages/{message_id}"
// body: "*"
// };
// }
// }
// message Message {
// string message_id = 1;
// string text = 2;
// }
//
//
// The following HTTP JSON to RPC mapping is enabled:
//
// HTTP | RPC
// -----|-----
// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
//
// Note that when using `*` in the body mapping, it is not possible to
// have HTTP parameters, as all fields not bound by the path end in
// the body. This makes this option more rarely used in practice of
// defining REST APIs. The common usage of `*` is in custom methods
// which don't use the URL at all for transferring data.
//
// It is possible to define multiple HTTP methods for one RPC by using
// the `additional_bindings` option. Example:
//
// service Messaging {
// rpc GetMessage(GetMessageRequest) returns (Message) {
// option (google.api.http) = {
// get: "/v1/messages/{message_id}"
// additional_bindings {
// get: "/v1/users/{user_id}/messages/{message_id}"
// }
// };
// }
// }
// message GetMessageRequest {
// string message_id = 1;
// string user_id = 2;
// }
//
//
// This enables the following two alternative HTTP JSON to RPC
// mappings:
//
// HTTP | RPC
// -----|-----
// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
//
// # Rules for HTTP mapping
//
// The rules for mapping HTTP path, query parameters, and body fields
// to the request message are as follows:
//
// 1. The `body` field specifies either `*` or a field path, or is
// omitted. If omitted, it assumes there is no HTTP body.
// 2. Leaf fields (recursive expansion of nested messages in the
// request) can be classified into three types:
// (a) Matched in the URL template.
// (b) Covered by body (if body is `*`, everything except (a) fields;
// else everything under the body field)
// (c) All other fields.
// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
// 4. Any body sent with an HTTP request can contain only (b) fields.
//
// The syntax of the path template is as follows:
//
// Template = "/" Segments [ Verb ] ;
// Segments = Segment { "/" Segment } ;
// Segment = "*" | "**" | LITERAL | Variable ;
// Variable = "{" FieldPath [ "=" Segments ] "}" ;
// FieldPath = IDENT { "." IDENT } ;
// Verb = ":" LITERAL ;
//
// The syntax `*` matches a single path segment. It follows the semantics of
// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
// Expansion.
//
// The syntax `**` matches zero or more path segments. It follows the semantics
// of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.3 Reserved
// Expansion. NOTE: it must be the last segment in the path except the Verb.
//
// The syntax `LITERAL` matches literal text in the URL path.
//
// The syntax `Variable` matches the entire path as specified by its template;
// this nested template must not contain further variables. If a variable
// matches a single path segment, its template may be omitted, e.g. `{var}`
// is equivalent to `{var=*}`.
//
// NOTE: the field paths in variables and in the `body` must not refer to
// repeated fields or map fields.
//
// Use CustomHttpPattern to specify any HTTP method that is not included in the
// `pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified for
// a given URL path rule. The wild-card rule is useful for services that provide
// content to Web (HTML) clients.
type HttpRule struct {
// Selects methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
// Determines the URL pattern is matched by this rules. This pattern can be
// used with any of the {get|put|post|delete|patch} methods. A custom method
// can be defined using the 'custom' field.
//
// Types that are valid to be assigned to Pattern:
// *HttpRule_Get
// *HttpRule_Put
// *HttpRule_Post
// *HttpRule_Delete
// *HttpRule_Patch
// *HttpRule_Custom
Pattern isHttpRule_Pattern `protobuf_oneof:"pattern"`
// The name of the request field whose value is mapped to the HTTP body, or
// `*` for mapping all fields not captured by the path pattern to the HTTP
// body. NOTE: the referred field must not be a repeated field and must be
// present at the top-level of request message type.
Body string `protobuf:"bytes,7,opt,name=body" json:"body,omitempty"`
// Additional HTTP bindings for the selector. Nested bindings must
// not contain an `additional_bindings` field themselves (that is,
// the nesting may only be one level deep).
AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings" json:"additional_bindings,omitempty"`
}
func (m *HttpRule) Reset() { *m = HttpRule{} }
func (m *HttpRule) String() string { return proto.CompactTextString(m) }
func (*HttpRule) ProtoMessage() {}
func (*HttpRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
type isHttpRule_Pattern interface {
isHttpRule_Pattern()
}
type HttpRule_Get struct {
Get string `protobuf:"bytes,2,opt,name=get,oneof"`
}
type HttpRule_Put struct {
Put string `protobuf:"bytes,3,opt,name=put,oneof"`
}
type HttpRule_Post struct {
Post string `protobuf:"bytes,4,opt,name=post,oneof"`
}
type HttpRule_Delete struct {
Delete string `protobuf:"bytes,5,opt,name=delete,oneof"`
}
type HttpRule_Patch struct {
Patch string `protobuf:"bytes,6,opt,name=patch,oneof"`
}
type HttpRule_Custom struct {
Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,oneof"`
}
func (*HttpRule_Get) isHttpRule_Pattern() {}
func (*HttpRule_Put) isHttpRule_Pattern() {}
func (*HttpRule_Post) isHttpRule_Pattern() {}
func (*HttpRule_Delete) isHttpRule_Pattern() {}
func (*HttpRule_Patch) isHttpRule_Pattern() {}
func (*HttpRule_Custom) isHttpRule_Pattern() {}
func (m *HttpRule) GetPattern() isHttpRule_Pattern {
if m != nil {
return m.Pattern
}
return nil
}
func (m *HttpRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *HttpRule) GetGet() string {
if x, ok := m.GetPattern().(*HttpRule_Get); ok {
return x.Get
}
return ""
}
func (m *HttpRule) GetPut() string {
if x, ok := m.GetPattern().(*HttpRule_Put); ok {
return x.Put
}
return ""
}
func (m *HttpRule) GetPost() string {
if x, ok := m.GetPattern().(*HttpRule_Post); ok {
return x.Post
}
return ""
}
func (m *HttpRule) GetDelete() string {
if x, ok := m.GetPattern().(*HttpRule_Delete); ok {
return x.Delete
}
return ""
}
func (m *HttpRule) GetPatch() string {
if x, ok := m.GetPattern().(*HttpRule_Patch); ok {
return x.Patch
}
return ""
}
func (m *HttpRule) GetCustom() *CustomHttpPattern {
if x, ok := m.GetPattern().(*HttpRule_Custom); ok {
return x.Custom
}
return nil
}
func (m *HttpRule) GetBody() string {
if m != nil {
return m.Body
}
return ""
}
func (m *HttpRule) GetAdditionalBindings() []*HttpRule {
if m != nil {
return m.AdditionalBindings
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*HttpRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _HttpRule_OneofMarshaler, _HttpRule_OneofUnmarshaler, _HttpRule_OneofSizer, []interface{}{
(*HttpRule_Get)(nil),
(*HttpRule_Put)(nil),
(*HttpRule_Post)(nil),
(*HttpRule_Delete)(nil),
(*HttpRule_Patch)(nil),
(*HttpRule_Custom)(nil),
}
}
func _HttpRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*HttpRule)
// pattern
switch x := m.Pattern.(type) {
case *HttpRule_Get:
b.EncodeVarint(2<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Get)
case *HttpRule_Put:
b.EncodeVarint(3<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Put)
case *HttpRule_Post:
b.EncodeVarint(4<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Post)
case *HttpRule_Delete:
b.EncodeVarint(5<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Delete)
case *HttpRule_Patch:
b.EncodeVarint(6<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Patch)
case *HttpRule_Custom:
b.EncodeVarint(8<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Custom); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("HttpRule.Pattern has unexpected type %T", x)
}
return nil
}
func _HttpRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*HttpRule)
switch tag {
case 2: // pattern.get
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Pattern = &HttpRule_Get{x}
return true, err
case 3: // pattern.put
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Pattern = &HttpRule_Put{x}
return true, err
case 4: // pattern.post
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Pattern = &HttpRule_Post{x}
return true, err
case 5: // pattern.delete
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Pattern = &HttpRule_Delete{x}
return true, err
case 6: // pattern.patch
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Pattern = &HttpRule_Patch{x}
return true, err
case 8: // pattern.custom
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(CustomHttpPattern)
err := b.DecodeMessage(msg)
m.Pattern = &HttpRule_Custom{msg}
return true, err
default:
return false, nil
}
}
func _HttpRule_OneofSizer(msg proto.Message) (n int) {
m := msg.(*HttpRule)
// pattern
switch x := m.Pattern.(type) {
case *HttpRule_Get:
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Get)))
n += len(x.Get)
case *HttpRule_Put:
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Put)))
n += len(x.Put)
case *HttpRule_Post:
n += proto.SizeVarint(4<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Post)))
n += len(x.Post)
case *HttpRule_Delete:
n += proto.SizeVarint(5<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Delete)))
n += len(x.Delete)
case *HttpRule_Patch:
n += proto.SizeVarint(6<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Patch)))
n += len(x.Patch)
case *HttpRule_Custom:
s := proto.Size(x.Custom)
n += proto.SizeVarint(8<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
// A custom pattern is used for defining custom HTTP verb.
type CustomHttpPattern struct {
// The name of this custom HTTP verb.
Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"`
// The path matched by this custom verb.
Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"`
}
func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} }
func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) }
func (*CustomHttpPattern) ProtoMessage() {}
func (*CustomHttpPattern) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
func (m *CustomHttpPattern) GetKind() string {
if m != nil {
return m.Kind
}
return ""
}
func (m *CustomHttpPattern) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func init() {
proto.RegisterType((*Http)(nil), "google.api.Http")
proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule")
proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern")
}
func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor1) }
var fileDescriptor1 = []byte{
// 359 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x6a, 0xe3, 0x30,
0x10, 0xc6, 0xd7, 0x89, 0xe3, 0x24, 0x13, 0x58, 0x58, 0x6d, 0x76, 0x11, 0x85, 0x42, 0xc8, 0x29,
0xf4, 0x60, 0x43, 0x7a, 0xe8, 0x21, 0xa7, 0xb8, 0x94, 0xa6, 0xb7, 0xe0, 0x63, 0x2f, 0x45, 0xb1,
0x85, 0xa2, 0xd6, 0x91, 0x84, 0x3d, 0x3e, 0xf4, 0x75, 0xfa, 0x0e, 0x7d, 0xb7, 0x1e, 0x8b, 0xfe,
0xa4, 0x09, 0x14, 0x7a, 0x9b, 0xef, 0x37, 0x9f, 0x34, 0xa3, 0x19, 0xc1, 0x3f, 0xa1, 0xb5, 0xa8,
0x79, 0xc6, 0x8c, 0xcc, 0xf6, 0x88, 0x26, 0x35, 0x8d, 0x46, 0x4d, 0xc0, 0xe3, 0x94, 0x19, 0x39,
0x5f, 0x42, 0xbc, 0x41, 0x34, 0xe4, 0x0a, 0x06, 0x4d, 0x57, 0xf3, 0x96, 0x46, 0xb3, 0xfe, 0x62,
0xb2, 0x9c, 0xa6, 0x27, 0x4f, 0x6a, 0x0d, 0x45, 0x57, 0xf3, 0xc2, 0x5b, 0xe6, 0xef, 0x3d, 0x18,
0x1d, 0x19, 0xb9, 0x80, 0x51, 0xcb, 0x6b, 0x5e, 0xa2, 0x6e, 0x68, 0x34, 0x8b, 0x16, 0xe3, 0xe2,
0x4b, 0x13, 0x02, 0x7d, 0xc1, 0x91, 0xf6, 0x2c, 0xde, 0xfc, 0x2a, 0xac, 0xb0, 0xcc, 0x74, 0x48,
0xfb, 0x47, 0x66, 0x3a, 0x24, 0x53, 0x88, 0x8d, 0x6e, 0x91, 0xc6, 0x01, 0x3a, 0x45, 0x28, 0x24,
0x15, 0xaf, 0x39, 0x72, 0x3a, 0x08, 0x3c, 0x68, 0xf2, 0x1f, 0x06, 0x86, 0x61, 0xb9, 0xa7, 0x49,
0x48, 0x78, 0x49, 0x6e, 0x20, 0x29, 0xbb, 0x16, 0xf5, 0x81, 0x8e, 0x66, 0xd1, 0x62, 0xb2, 0xbc,
0x3c, 0x7f, 0xc5, 0xad, 0xcb, 0xd8, 0xbe, 0xb7, 0x0c, 0x91, 0x37, 0xca, 0x5e, 0xe8, 0xed, 0x84,
0x40, 0xbc, 0xd3, 0xd5, 0x2b, 0x1d, 0xba, 0x07, 0xb8, 0x98, 0xdc, 0xc1, 0x5f, 0x56, 0x55, 0x12,
0xa5, 0x56, 0xac, 0x7e, 0xda, 0x49, 0x55, 0x49, 0x25, 0x5a, 0x3a, 0xf9, 0x61, 0x3e, 0xe4, 0x74,
0x20, 0x0f, 0xfe, 0x7c, 0x0c, 0x43, 0xe3, 0xeb, 0xcd, 0x57, 0xf0, 0xe7, 0x5b, 0x13, 0xb6, 0xf4,
0x8b, 0x54, 0x55, 0x98, 0x9d, 0x8b, 0x2d, 0x33, 0x0c, 0xf7, 0x7e, 0x70, 0x85, 0x8b, 0xf3, 0x67,
0xf8, 0x5d, 0xea, 0xc3, 0x59, 0xd9, 0x7c, 0xec, 0xae, 0xb1, 0x1b, 0xdd, 0x46, 0x8f, 0xeb, 0x90,
0x10, 0xba, 0x66, 0x4a, 0xa4, 0xba, 0x11, 0x99, 0xe0, 0xca, 0xed, 0x3b, 0xf3, 0x29, 0x66, 0x64,
0xeb, 0x7e, 0x02, 0x53, 0x4a, 0x23, 0xb3, 0x6d, 0xb6, 0xab, 0xb3, 0xf8, 0x23, 0x8a, 0xde, 0x7a,
0xf1, 0xfd, 0x7a, 0xfb, 0xb0, 0x4b, 0xdc, 0xb9, 0xeb, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68,
0x15, 0x60, 0x5b, 0x40, 0x02, 0x00, 0x00,
}

View File

@ -0,0 +1,80 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/experimental/authorization_config.proto
/*
Package api is a generated protocol buffer package.
It is generated from these files:
google/api/experimental/authorization_config.proto
google/api/experimental/experimental.proto
It has these top-level messages:
AuthorizationConfig
Experimental
*/
package api
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Configuration of authorization.
//
// This section determines the authorization provider, if unspecified, then no
// authorization check will be done.
//
// Example:
//
// experimental:
// authorization:
// provider: firebaserules.googleapis.com
type AuthorizationConfig struct {
// The name of the authorization provider, such as
// firebaserules.googleapis.com.
Provider string `protobuf:"bytes,1,opt,name=provider" json:"provider,omitempty"`
}
func (m *AuthorizationConfig) Reset() { *m = AuthorizationConfig{} }
func (m *AuthorizationConfig) String() string { return proto.CompactTextString(m) }
func (*AuthorizationConfig) ProtoMessage() {}
func (*AuthorizationConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *AuthorizationConfig) GetProvider() string {
if m != nil {
return m.Provider
}
return ""
}
func init() {
proto.RegisterType((*AuthorizationConfig)(nil), "google.api.AuthorizationConfig")
}
func init() { proto.RegisterFile("google/api/experimental/authorization_config.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 180 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4a, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xad, 0x28, 0x48, 0x2d, 0xca, 0xcc, 0x4d, 0xcd,
0x2b, 0x49, 0xcc, 0xd1, 0x4f, 0x2c, 0x2d, 0xc9, 0xc8, 0x2f, 0xca, 0xac, 0x4a, 0x2c, 0xc9, 0xcc,
0xcf, 0x8b, 0x4f, 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2,
0x82, 0xe8, 0xd1, 0x4b, 0x2c, 0xc8, 0x54, 0x32, 0xe4, 0x12, 0x76, 0x44, 0x56, 0xe9, 0x0c, 0x56,
0x28, 0x24, 0xc5, 0xc5, 0x51, 0x50, 0x94, 0x5f, 0x96, 0x99, 0x92, 0x5a, 0x24, 0xc1, 0xa8, 0xc0,
0xa8, 0xc1, 0x19, 0x04, 0xe7, 0x3b, 0x25, 0x71, 0xf1, 0x25, 0xe7, 0xe7, 0xea, 0x21, 0x0c, 0x71,
0x92, 0xc0, 0x62, 0x44, 0x00, 0xc8, 0xaa, 0x00, 0xc6, 0x28, 0x5d, 0xa8, 0xba, 0xf4, 0xfc, 0x9c,
0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x43, 0xf4, 0x21, 0x52,
0x89, 0x05, 0x99, 0xc5, 0x20, 0xf7, 0x5b, 0x27, 0x16, 0x64, 0x2e, 0x62, 0x62, 0x71, 0x77, 0x0c,
0xf0, 0x4c, 0x62, 0x03, 0x2b, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x52, 0x27, 0x0c, 0xba,
0xdf, 0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,56 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/experimental/experimental.proto
package api
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Experimental service configuration. These configuration options can
// only be used by whitelisted users.
type Experimental struct {
// Authorization configuration.
Authorization *AuthorizationConfig `protobuf:"bytes,8,opt,name=authorization" json:"authorization,omitempty"`
}
func (m *Experimental) Reset() { *m = Experimental{} }
func (m *Experimental) String() string { return proto.CompactTextString(m) }
func (*Experimental) ProtoMessage() {}
func (*Experimental) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (m *Experimental) GetAuthorization() *AuthorizationConfig {
if m != nil {
return m.Authorization
}
return nil
}
func init() {
proto.RegisterType((*Experimental)(nil), "google.api.Experimental")
}
func init() { proto.RegisterFile("google/api/experimental/experimental.proto", fileDescriptor1) }
var fileDescriptor1 = []byte{
// 204 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xad, 0x28, 0x48, 0x2d, 0xca, 0xcc, 0x4d, 0xcd,
0x2b, 0x49, 0xcc, 0x41, 0xe1, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x71, 0x41, 0xd4, 0xea,
0x25, 0x16, 0x64, 0x4a, 0xc9, 0x20, 0xe9, 0x4b, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc,
0xcf, 0x2b, 0x86, 0xa8, 0x94, 0x32, 0xc2, 0x65, 0x6a, 0x62, 0x69, 0x49, 0x46, 0x7e, 0x51, 0x66,
0x15, 0x58, 0x75, 0x7c, 0x72, 0x7e, 0x5e, 0x5a, 0x66, 0x3a, 0x44, 0x8f, 0x52, 0x28, 0x17, 0x8f,
0x2b, 0x92, 0x52, 0x21, 0x57, 0x2e, 0x5e, 0x14, 0xd5, 0x12, 0x1c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46,
0xf2, 0x7a, 0x08, 0x57, 0xe8, 0x39, 0x22, 0x2b, 0x70, 0x06, 0x9b, 0x16, 0x84, 0xaa, 0xcb, 0x29,
0x9a, 0x8b, 0x2f, 0x39, 0x3f, 0x17, 0x49, 0x93, 0x93, 0x20, 0xb2, 0x35, 0x01, 0x20, 0xbb, 0x03,
0x18, 0xa3, 0x74, 0xa1, 0x0a, 0xd2, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5,
0xd3, 0x53, 0xf3, 0xc0, 0x2e, 0xd3, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x83, 0x3c, 0x64, 0x9d,
0x58, 0x90, 0xb9, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, 0xac, 0xc0, 0x18, 0x10,
0x00, 0x00, 0xff, 0xff, 0xa0, 0x95, 0x20, 0xe5, 0x46, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,119 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/label.proto
/*
Package label is a generated protocol buffer package.
It is generated from these files:
google/api/label.proto
It has these top-level messages:
LabelDescriptor
*/
package label
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Value types that can be used as label values.
type LabelDescriptor_ValueType int32
const (
// A variable-length string. This is the default.
LabelDescriptor_STRING LabelDescriptor_ValueType = 0
// Boolean; true or false.
LabelDescriptor_BOOL LabelDescriptor_ValueType = 1
// A 64-bit signed integer.
LabelDescriptor_INT64 LabelDescriptor_ValueType = 2
)
var LabelDescriptor_ValueType_name = map[int32]string{
0: "STRING",
1: "BOOL",
2: "INT64",
}
var LabelDescriptor_ValueType_value = map[string]int32{
"STRING": 0,
"BOOL": 1,
"INT64": 2,
}
func (x LabelDescriptor_ValueType) String() string {
return proto.EnumName(LabelDescriptor_ValueType_name, int32(x))
}
func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// A description of a label.
type LabelDescriptor struct {
// The label key.
Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
// The type of data that can be assigned to the label.
ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"`
// A human-readable description for the label.
Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
}
func (m *LabelDescriptor) Reset() { *m = LabelDescriptor{} }
func (m *LabelDescriptor) String() string { return proto.CompactTextString(m) }
func (*LabelDescriptor) ProtoMessage() {}
func (*LabelDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *LabelDescriptor) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *LabelDescriptor) GetValueType() LabelDescriptor_ValueType {
if m != nil {
return m.ValueType
}
return LabelDescriptor_STRING
}
func (m *LabelDescriptor) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func init() {
proto.RegisterType((*LabelDescriptor)(nil), "google.api.LabelDescriptor")
proto.RegisterEnum("google.api.LabelDescriptor_ValueType", LabelDescriptor_ValueType_name, LabelDescriptor_ValueType_value)
}
func init() { proto.RegisterFile("google/api/label.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 252 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x49, 0x4c, 0x4a, 0xcd, 0xd1, 0x2b, 0x28, 0xca,
0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x88, 0xeb, 0x25, 0x16, 0x64, 0x2a, 0xed, 0x64, 0xe4, 0xe2, 0xf7,
0x01, 0xc9, 0xb9, 0xa4, 0x16, 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0x09, 0x09, 0x70, 0x31,
0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0x98, 0x42, 0x2e, 0x5c, 0x5c,
0x65, 0x89, 0x39, 0xa5, 0xa9, 0xf1, 0x25, 0x95, 0x05, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x7c,
0x46, 0xaa, 0x7a, 0x08, 0x63, 0xf4, 0xd0, 0x8c, 0xd0, 0x0b, 0x03, 0xa9, 0x0e, 0xa9, 0x2c, 0x48,
0x0d, 0xe2, 0x2c, 0x83, 0x31, 0x85, 0x14, 0xb8, 0xb8, 0x53, 0xa0, 0x4a, 0x32, 0xf3, 0xf3, 0x24,
0x98, 0xc1, 0xe6, 0x23, 0x0b, 0x29, 0xe9, 0x70, 0x71, 0xc2, 0x75, 0x0a, 0x71, 0x71, 0xb1, 0x05,
0x87, 0x04, 0x79, 0xfa, 0xb9, 0x0b, 0x30, 0x08, 0x71, 0x70, 0xb1, 0x38, 0xf9, 0xfb, 0xfb, 0x08,
0x30, 0x0a, 0x71, 0x72, 0xb1, 0x7a, 0xfa, 0x85, 0x98, 0x99, 0x08, 0x30, 0x39, 0xc5, 0x73, 0xf1,
0x25, 0xe7, 0xe7, 0x22, 0x39, 0xc3, 0x89, 0x0b, 0xec, 0x8e, 0x00, 0x90, 0x2f, 0x03, 0x18, 0xa3,
0x4c, 0xa1, 0x32, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, 0xe9, 0xa9,
0x79, 0xe0, 0x30, 0xd0, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x23, 0x82, 0xc7, 0x1a, 0x4c, 0xfe,
0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xe2, 0xee, 0x18, 0xe0, 0x99, 0xc4, 0x06, 0x56, 0x6b, 0x0c, 0x08,
0x00, 0x00, 0xff, 0xff, 0x57, 0x04, 0xaa, 0x1f, 0x49, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,358 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/metric.proto
/*
Package metric is a generated protocol buffer package.
It is generated from these files:
google/api/metric.proto
It has these top-level messages:
MetricDescriptor
Metric
*/
package metric
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_api "google.golang.org/genproto/googleapis/api/label"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The kind of measurement. It describes how the data is reported.
type MetricDescriptor_MetricKind int32
const (
// Do not use this default value.
MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0
// An instantaneous measurement of a value.
MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1
// The change in a value during a time interval.
MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2
// A value accumulated over a time interval. Cumulative
// measurements in a time series should have the same start time
// and increasing end times, until an event resets the cumulative
// value to zero and sets a new start time for the following
// points.
MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3
)
var MetricDescriptor_MetricKind_name = map[int32]string{
0: "METRIC_KIND_UNSPECIFIED",
1: "GAUGE",
2: "DELTA",
3: "CUMULATIVE",
}
var MetricDescriptor_MetricKind_value = map[string]int32{
"METRIC_KIND_UNSPECIFIED": 0,
"GAUGE": 1,
"DELTA": 2,
"CUMULATIVE": 3,
}
func (x MetricDescriptor_MetricKind) String() string {
return proto.EnumName(MetricDescriptor_MetricKind_name, int32(x))
}
func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) {
return fileDescriptor0, []int{0, 0}
}
// The value type of a metric.
type MetricDescriptor_ValueType int32
const (
// Do not use this default value.
MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0
// The value is a boolean.
// This value type can be used only if the metric kind is `GAUGE`.
MetricDescriptor_BOOL MetricDescriptor_ValueType = 1
// The value is a signed 64-bit integer.
MetricDescriptor_INT64 MetricDescriptor_ValueType = 2
// The value is a double precision floating point number.
MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3
// The value is a text string.
// This value type can be used only if the metric kind is `GAUGE`.
MetricDescriptor_STRING MetricDescriptor_ValueType = 4
// The value is a [`Distribution`][google.api.Distribution].
MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5
// The value is money.
MetricDescriptor_MONEY MetricDescriptor_ValueType = 6
)
var MetricDescriptor_ValueType_name = map[int32]string{
0: "VALUE_TYPE_UNSPECIFIED",
1: "BOOL",
2: "INT64",
3: "DOUBLE",
4: "STRING",
5: "DISTRIBUTION",
6: "MONEY",
}
var MetricDescriptor_ValueType_value = map[string]int32{
"VALUE_TYPE_UNSPECIFIED": 0,
"BOOL": 1,
"INT64": 2,
"DOUBLE": 3,
"STRING": 4,
"DISTRIBUTION": 5,
"MONEY": 6,
}
func (x MetricDescriptor_ValueType) String() string {
return proto.EnumName(MetricDescriptor_ValueType_name, int32(x))
}
func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor0, []int{0, 1}
}
// Defines a metric type and its schema. Once a metric descriptor is created,
// deleting or altering it stops data collection and makes the metric type's
// existing data unusable.
type MetricDescriptor struct {
// The resource name of the metric descriptor. Depending on the
// implementation, the name typically includes: (1) the parent resource name
// that defines the scope of the metric type or of its data; and (2) the
// metric's URL-encoded type, which also appears in the `type` field of this
// descriptor. For example, following is the resource name of a custom
// metric within the GCP project `my-project-id`:
//
// "projects/my-project-id/metricDescriptors/custom.googleapis.com%2Finvoice%2Fpaid%2Famount"
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// The metric type, including its DNS name prefix. The type is not
// URL-encoded. All user-defined custom metric types have the DNS name
// `custom.googleapis.com`. Metric types should use a natural hierarchical
// grouping. For example:
//
// "custom.googleapis.com/invoice/paid/amount"
// "appengine.googleapis.com/http/server/response_latencies"
Type string `protobuf:"bytes,8,opt,name=type" json:"type,omitempty"`
// The set of labels that can be used to describe a specific
// instance of this metric type. For example, the
// `appengine.googleapis.com/http/server/response_latencies` metric
// type has a label for the HTTP response code, `response_code`, so
// you can look at latencies for successful responses or just
// for responses that failed.
Labels []*google_api.LabelDescriptor `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty"`
// Whether the metric records instantaneous values, changes to a value, etc.
// Some combinations of `metric_kind` and `value_type` might not be supported.
MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"`
// Whether the measurement is an integer, a floating-point number, etc.
// Some combinations of `metric_kind` and `value_type` might not be supported.
ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"`
// The unit in which the metric value is reported. It is only applicable
// if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The
// supported units are a subset of [The Unified Code for Units of
// Measure](http://unitsofmeasure.org/ucum.html) standard:
//
// **Basic units (UNIT)**
//
// * `bit` bit
// * `By` byte
// * `s` second
// * `min` minute
// * `h` hour
// * `d` day
//
// **Prefixes (PREFIX)**
//
// * `k` kilo (10**3)
// * `M` mega (10**6)
// * `G` giga (10**9)
// * `T` tera (10**12)
// * `P` peta (10**15)
// * `E` exa (10**18)
// * `Z` zetta (10**21)
// * `Y` yotta (10**24)
// * `m` milli (10**-3)
// * `u` micro (10**-6)
// * `n` nano (10**-9)
// * `p` pico (10**-12)
// * `f` femto (10**-15)
// * `a` atto (10**-18)
// * `z` zepto (10**-21)
// * `y` yocto (10**-24)
// * `Ki` kibi (2**10)
// * `Mi` mebi (2**20)
// * `Gi` gibi (2**30)
// * `Ti` tebi (2**40)
//
// **Grammar**
//
// The grammar includes the dimensionless unit `1`, such as `1/s`.
//
// The grammar also includes these connectors:
//
// * `/` division (as an infix operator, e.g. `1/s`).
// * `.` multiplication (as an infix operator, e.g. `GBy.d`)
//
// The grammar for a unit is as follows:
//
// Expression = Component { "." Component } { "/" Component } ;
//
// Component = [ PREFIX ] UNIT [ Annotation ]
// | Annotation
// | "1"
// ;
//
// Annotation = "{" NAME "}" ;
//
// Notes:
//
// * `Annotation` is just a comment if it follows a `UNIT` and is
// equivalent to `1` if it is used alone. For examples,
// `{requests}/s == 1/s`, `By{transmitted}/s == By/s`.
// * `NAME` is a sequence of non-blank printable ASCII characters not
// containing '{' or '}'.
Unit string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
// A detailed description of the metric, which can be used in documentation.
Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"`
// A concise name for the metric, which can be displayed in user interfaces.
// Use sentence case without an ending period, for example "Request count".
DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
}
func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} }
func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) }
func (*MetricDescriptor) ProtoMessage() {}
func (*MetricDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *MetricDescriptor) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *MetricDescriptor) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *MetricDescriptor) GetLabels() []*google_api.LabelDescriptor {
if m != nil {
return m.Labels
}
return nil
}
func (m *MetricDescriptor) GetMetricKind() MetricDescriptor_MetricKind {
if m != nil {
return m.MetricKind
}
return MetricDescriptor_METRIC_KIND_UNSPECIFIED
}
func (m *MetricDescriptor) GetValueType() MetricDescriptor_ValueType {
if m != nil {
return m.ValueType
}
return MetricDescriptor_VALUE_TYPE_UNSPECIFIED
}
func (m *MetricDescriptor) GetUnit() string {
if m != nil {
return m.Unit
}
return ""
}
func (m *MetricDescriptor) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *MetricDescriptor) GetDisplayName() string {
if m != nil {
return m.DisplayName
}
return ""
}
// A specific metric, identified by specifying values for all of the
// labels of a [`MetricDescriptor`][google.api.MetricDescriptor].
type Metric struct {
// An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor].
// For example, `custom.googleapis.com/invoice/paid/amount`.
Type string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"`
// The set of label values that uniquely identify this metric. All
// labels listed in the `MetricDescriptor` must be assigned values.
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Metric) Reset() { *m = Metric{} }
func (m *Metric) String() string { return proto.CompactTextString(m) }
func (*Metric) ProtoMessage() {}
func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Metric) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *Metric) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func init() {
proto.RegisterType((*MetricDescriptor)(nil), "google.api.MetricDescriptor")
proto.RegisterType((*Metric)(nil), "google.api.Metric")
proto.RegisterEnum("google.api.MetricDescriptor_MetricKind", MetricDescriptor_MetricKind_name, MetricDescriptor_MetricKind_value)
proto.RegisterEnum("google.api.MetricDescriptor_ValueType", MetricDescriptor_ValueType_name, MetricDescriptor_ValueType_value)
}
func init() { proto.RegisterFile("google/api/metric.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 506 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0x4d, 0x6f, 0xda, 0x40,
0x10, 0xad, 0x3f, 0x70, 0xc3, 0x10, 0xa1, 0xd5, 0xaa, 0x4a, 0x2c, 0x22, 0x55, 0x94, 0x43, 0xcb,
0x09, 0xa4, 0xa4, 0x4a, 0xbf, 0x4e, 0x80, 0xb7, 0xd4, 0x8a, 0xb1, 0x91, 0x63, 0x23, 0xa5, 0x17,
0xcb, 0x81, 0x95, 0x65, 0xc5, 0xd8, 0xae, 0x71, 0x22, 0xf9, 0x57, 0xf4, 0x17, 0xf4, 0xd2, 0x5f,
0x5a, 0xed, 0xae, 0x03, 0x16, 0x95, 0x72, 0xe2, 0xed, 0x9b, 0x37, 0x6f, 0x67, 0x96, 0x67, 0x38,
0x8f, 0xb2, 0x2c, 0x4a, 0xe8, 0x38, 0xcc, 0xe3, 0xf1, 0x96, 0x96, 0x45, 0xbc, 0x1e, 0xe5, 0x45,
0x56, 0x66, 0x18, 0x44, 0x61, 0x14, 0xe6, 0x71, 0xef, 0xac, 0x21, 0x4a, 0xc2, 0x7b, 0x9a, 0x08,
0xcd, 0xe0, 0x8f, 0x0a, 0x68, 0xc1, 0x9b, 0x0c, 0xba, 0x5b, 0x17, 0x71, 0x5e, 0x66, 0x05, 0xc6,
0xa0, 0xa6, 0xe1, 0x96, 0xea, 0x52, 0x5f, 0x1a, 0xb6, 0x5d, 0x8e, 0x19, 0x57, 0x56, 0x39, 0xd5,
0x4f, 0x04, 0xc7, 0x30, 0xbe, 0x02, 0x8d, 0x7b, 0xed, 0x74, 0xb9, 0xaf, 0x0c, 0x3b, 0x97, 0x17,
0xa3, 0xc3, 0x8d, 0x23, 0x8b, 0x55, 0x0e, 0xa6, 0x6e, 0x2d, 0xc5, 0x3f, 0xa0, 0x23, 0xa6, 0x0c,
0x1e, 0xe2, 0x74, 0xa3, 0x2b, 0x7d, 0x69, 0xd8, 0xbd, 0xfc, 0xd0, 0xec, 0x3c, 0x9e, 0xa7, 0x26,
0x6e, 0xe2, 0x74, 0xe3, 0xc2, 0x76, 0x8f, 0x31, 0x01, 0x78, 0x0a, 0x93, 0x47, 0x1a, 0xf0, 0xc1,
0x54, 0x6e, 0xf4, 0xfe, 0x45, 0xa3, 0x15, 0x93, 0x7b, 0x55, 0x4e, 0xdd, 0xf6, 0xd3, 0x33, 0x64,
0x9b, 0x3d, 0xa6, 0x71, 0xa9, 0xb7, 0xc4, 0x66, 0x0c, 0xe3, 0x3e, 0x74, 0x36, 0x75, 0x5b, 0x9c,
0xa5, 0xba, 0xc6, 0x4b, 0x4d, 0x0a, 0xbf, 0x83, 0xd3, 0x4d, 0xbc, 0xcb, 0x93, 0xb0, 0x0a, 0xf8,
0x5b, 0xbd, 0xae, 0x25, 0x82, 0xb3, 0xc3, 0x2d, 0x1d, 0x38, 0x00, 0x87, 0xc9, 0xf1, 0x05, 0x9c,
0x2f, 0x88, 0xe7, 0x9a, 0xb3, 0xe0, 0xc6, 0xb4, 0x8d, 0xc0, 0xb7, 0x6f, 0x97, 0x64, 0x66, 0x7e,
0x37, 0x89, 0x81, 0x5e, 0xe1, 0x36, 0xb4, 0xe6, 0x13, 0x7f, 0x4e, 0x90, 0xc4, 0xa0, 0x41, 0x2c,
0x6f, 0x82, 0x64, 0xdc, 0x05, 0x98, 0xf9, 0x0b, 0xdf, 0x9a, 0x78, 0xe6, 0x8a, 0x20, 0x65, 0xf0,
0x0b, 0xda, 0xfb, 0x0d, 0x70, 0x0f, 0xce, 0x56, 0x13, 0xcb, 0x27, 0x81, 0x77, 0xb7, 0x24, 0x47,
0x76, 0x27, 0xa0, 0x4e, 0x1d, 0xc7, 0x12, 0x6e, 0xa6, 0xed, 0x5d, 0x7f, 0x44, 0x32, 0x06, 0xd0,
0x0c, 0xc7, 0x9f, 0x5a, 0x04, 0x29, 0x0c, 0xdf, 0x7a, 0xae, 0x69, 0xcf, 0x91, 0x8a, 0x11, 0x9c,
0x1a, 0x26, 0x3b, 0x4d, 0x7d, 0xcf, 0x74, 0x6c, 0xd4, 0x62, 0x4d, 0x0b, 0xc7, 0x26, 0x77, 0x48,
0x1b, 0xfc, 0x96, 0x40, 0x13, 0x4b, 0xec, 0x13, 0xa0, 0x34, 0x12, 0x70, 0x7d, 0x94, 0x80, 0xb7,
0xff, 0x3f, 0xbf, 0x08, 0xc2, 0x8e, 0xa4, 0x65, 0x51, 0x3d, 0x87, 0xa0, 0xf7, 0x05, 0x3a, 0x0d,
0x1a, 0x23, 0x50, 0x1e, 0x68, 0x55, 0xe7, 0x8d, 0x41, 0xfc, 0x06, 0x5a, 0xfc, 0x1f, 0xd2, 0x65,
0xce, 0x89, 0xc3, 0x57, 0xf9, 0xb3, 0x34, 0x0d, 0xa0, 0xbb, 0xce, 0xb6, 0x8d, 0x7b, 0xa6, 0x1d,
0x71, 0xd1, 0x92, 0x05, 0x7a, 0x29, 0xfd, 0xfc, 0x54, 0x97, 0xa2, 0x2c, 0x09, 0xd3, 0x68, 0x94,
0x15, 0xd1, 0x38, 0xa2, 0x29, 0x8f, 0xfb, 0x58, 0x94, 0xc2, 0x3c, 0xde, 0x35, 0x3e, 0x97, 0x6f,
0xe2, 0xe7, 0xaf, 0xac, 0xce, 0x27, 0x4b, 0xf3, 0x5e, 0xe3, 0xd2, 0xab, 0x7f, 0x01, 0x00, 0x00,
0xff, 0xff, 0x18, 0x04, 0x05, 0x82, 0x58, 0x03, 0x00, 0x00,
}

View File

@ -0,0 +1,180 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/monitored_resource.proto
/*
Package monitoredres is a generated protocol buffer package.
It is generated from these files:
google/api/monitored_resource.proto
It has these top-level messages:
MonitoredResourceDescriptor
MonitoredResource
*/
package monitoredres
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_api "google.golang.org/genproto/googleapis/api/label"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a
// type name and a set of labels. For example, the monitored resource
// descriptor for Google Compute Engine VM instances has a type of
// `"gce_instance"` and specifies the use of the labels `"instance_id"` and
// `"zone"` to identify particular VM instances.
//
// Different APIs can support different monitored resource types. APIs generally
// provide a `list` method that returns the monitored resource descriptors used
// by the API.
type MonitoredResourceDescriptor struct {
// Optional. The resource name of the monitored resource descriptor:
// `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where
// {type} is the value of the `type` field in this object and
// {project_id} is a project ID that provides API-specific context for
// accessing the type. APIs that do not use project information can use the
// resource name format `"monitoredResourceDescriptors/{type}"`.
Name string `protobuf:"bytes,5,opt,name=name" json:"name,omitempty"`
// Required. The monitored resource type. For example, the type
// `"cloudsql_database"` represents databases in Google Cloud SQL.
// The maximum length of this value is 256 characters.
Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
// Optional. A concise name for the monitored resource type that might be
// displayed in user interfaces. It should be a Title Cased Noun Phrase,
// without any article or other determiners. For example,
// `"Google Cloud SQL Database"`.
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
// Optional. A detailed description of the monitored resource type that might
// be used in documentation.
Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
// Required. A set of labels used to describe instances of this monitored
// resource type. For example, an individual Google Cloud SQL database is
// identified by values for the labels `"database_id"` and `"zone"`.
Labels []*google_api.LabelDescriptor `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty"`
}
func (m *MonitoredResourceDescriptor) Reset() { *m = MonitoredResourceDescriptor{} }
func (m *MonitoredResourceDescriptor) String() string { return proto.CompactTextString(m) }
func (*MonitoredResourceDescriptor) ProtoMessage() {}
func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *MonitoredResourceDescriptor) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *MonitoredResourceDescriptor) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *MonitoredResourceDescriptor) GetDisplayName() string {
if m != nil {
return m.DisplayName
}
return ""
}
func (m *MonitoredResourceDescriptor) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *MonitoredResourceDescriptor) GetLabels() []*google_api.LabelDescriptor {
if m != nil {
return m.Labels
}
return nil
}
// An object representing a resource that can be used for monitoring, logging,
// billing, or other purposes. Examples include virtual machine instances,
// databases, and storage devices such as disks. The `type` field identifies a
// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's
// schema. Information in the `labels` field identifies the actual resource and
// its attributes according to the schema. For example, a particular Compute
// Engine VM instance could be represented by the following object, because the
// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels
// `"instance_id"` and `"zone"`:
//
// { "type": "gce_instance",
// "labels": { "instance_id": "12345678901234",
// "zone": "us-central1-a" }}
type MonitoredResource struct {
// Required. The monitored resource type. This field must match
// the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For
// example, the type of a Cloud SQL database is `"cloudsql_database"`.
Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
// Required. Values for all of the labels listed in the associated monitored
// resource descriptor. For example, Cloud SQL databases use the labels
// `"database_id"` and `"zone"`.
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *MonitoredResource) Reset() { *m = MonitoredResource{} }
func (m *MonitoredResource) String() string { return proto.CompactTextString(m) }
func (*MonitoredResource) ProtoMessage() {}
func (*MonitoredResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *MonitoredResource) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *MonitoredResource) GetLabels() map[string]string {
if m != nil {
return m.Labels
}
return nil
}
func init() {
proto.RegisterType((*MonitoredResourceDescriptor)(nil), "google.api.MonitoredResourceDescriptor")
proto.RegisterType((*MonitoredResource)(nil), "google.api.MonitoredResource")
}
func init() { proto.RegisterFile("google/api/monitored_resource.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 321 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0x4b, 0x4b, 0x3b, 0x31,
0x10, 0x27, 0xdb, 0x07, 0xfc, 0x67, 0xff, 0x88, 0x06, 0x29, 0x4b, 0x7b, 0xa9, 0xf5, 0x52, 0x2f,
0xbb, 0x60, 0x2f, 0x3e, 0x4e, 0xad, 0x8a, 0x08, 0x2a, 0xa5, 0x47, 0x2f, 0x25, 0x6d, 0xc3, 0x12,
0xdc, 0x66, 0x42, 0xb2, 0x15, 0xf6, 0xeb, 0x08, 0x7e, 0x0e, 0xbf, 0x96, 0x47, 0xc9, 0xa3, 0x76,
0xa5, 0xde, 0x26, 0xbf, 0xf9, 0x3d, 0x66, 0x32, 0x70, 0x9a, 0x23, 0xe6, 0x05, 0xcf, 0x98, 0x12,
0xd9, 0x1a, 0xa5, 0x28, 0x51, 0xf3, 0xd5, 0x5c, 0x73, 0x83, 0x1b, 0xbd, 0xe4, 0xa9, 0xd2, 0x58,
0x22, 0x05, 0x4f, 0x4a, 0x99, 0x12, 0xdd, 0x4e, 0x4d, 0x50, 0xb0, 0x05, 0x2f, 0x3c, 0x67, 0xf0,
0x49, 0xa0, 0xf7, 0xb4, 0x35, 0x98, 0x05, 0xfd, 0x2d, 0x37, 0x4b, 0x2d, 0x54, 0x89, 0x9a, 0x52,
0x68, 0x4a, 0xb6, 0xe6, 0x49, 0xab, 0x4f, 0x86, 0xff, 0x66, 0xae, 0xb6, 0x58, 0x59, 0x29, 0x9e,
0x10, 0x8f, 0xd9, 0x9a, 0x9e, 0xc0, 0xff, 0x95, 0x30, 0xaa, 0x60, 0xd5, 0xdc, 0xf1, 0x23, 0xd7,
0x8b, 0x03, 0xf6, 0x6c, 0x65, 0x7d, 0x88, 0x57, 0xc1, 0x58, 0xa0, 0x4c, 0x1a, 0x81, 0xb1, 0x83,
0xe8, 0x08, 0xda, 0x6e, 0x36, 0x93, 0x34, 0xfb, 0x8d, 0x61, 0x7c, 0xde, 0x4b, 0x77, 0x1b, 0xa4,
0x8f, 0xb6, 0xb3, 0x9b, 0x6c, 0x16, 0xa8, 0x83, 0x0f, 0x02, 0x47, 0x7b, 0x1b, 0xfc, 0x39, 0xe3,
0xf8, 0xc7, 0x3e, 0x72, 0xf6, 0x67, 0x75, 0xfb, 0x3d, 0x0b, 0x1f, 0x68, 0xee, 0x64, 0xa9, 0xab,
0x6d, 0x58, 0xf7, 0x12, 0xe2, 0x1a, 0x4c, 0x0f, 0xa1, 0xf1, 0xca, 0xab, 0x10, 0x62, 0x4b, 0x7a,
0x0c, 0xad, 0x37, 0x56, 0x6c, 0xb6, 0x1f, 0xe0, 0x1f, 0x57, 0xd1, 0x05, 0x99, 0x54, 0x70, 0xb0,
0xc4, 0x75, 0x2d, 0x72, 0xd2, 0xd9, 0xcb, 0x9c, 0xda, 0x9b, 0x4c, 0xc9, 0xcb, 0x4d, 0x60, 0xe5,
0x58, 0x30, 0x99, 0xa7, 0xa8, 0xf3, 0x2c, 0xe7, 0xd2, 0x5d, 0x2c, 0xf3, 0x2d, 0xa6, 0x84, 0xf9,
0x7d, 0x7d, 0xcd, 0xcd, 0x75, 0xfd, 0xf1, 0x45, 0xc8, 0x7b, 0xd4, 0xbc, 0x1f, 0x4f, 0x1f, 0x16,
0x6d, 0xa7, 0x1c, 0x7d, 0x07, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xfb, 0xfb, 0x11, 0x36, 0x02, 0x00,
0x00,
}

View File

@ -0,0 +1,381 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/auth.proto
/*
Package serviceconfig is a generated protocol buffer package.
It is generated from these files:
google/api/auth.proto
google/api/backend.proto
google/api/billing.proto
google/api/consumer.proto
google/api/context.proto
google/api/control.proto
google/api/documentation.proto
google/api/endpoint.proto
google/api/log.proto
google/api/logging.proto
google/api/monitoring.proto
google/api/quota.proto
google/api/service.proto
google/api/source_info.proto
google/api/system_parameter.proto
google/api/usage.proto
It has these top-level messages:
Authentication
AuthenticationRule
AuthProvider
OAuthRequirements
AuthRequirement
Backend
BackendRule
Billing
BillingStatusRule
ProjectProperties
Property
Context
ContextRule
Control
Documentation
DocumentationRule
Page
Endpoint
LogDescriptor
Logging
Monitoring
Quota
MetricRule
QuotaLimit
Service
SourceInfo
SystemParameters
SystemParameterRule
SystemParameter
Usage
UsageRule
*/
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Authentication` defines the authentication configuration for an API.
//
// Example for an API targeted for external use:
//
// name: calendar.googleapis.com
// authentication:
// providers:
// - id: google_calendar_auth
// jwks_uri: https://www.googleapis.com/oauth2/v1/certs
// issuer: https://securetoken.google.com
// rules:
// - selector: "*"
// requirements:
// provider_id: google_calendar_auth
type Authentication struct {
// A list of authentication rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*AuthenticationRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"`
// Defines a set of authentication providers that a service supports.
Providers []*AuthProvider `protobuf:"bytes,4,rep,name=providers" json:"providers,omitempty"`
}
func (m *Authentication) Reset() { *m = Authentication{} }
func (m *Authentication) String() string { return proto.CompactTextString(m) }
func (*Authentication) ProtoMessage() {}
func (*Authentication) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Authentication) GetRules() []*AuthenticationRule {
if m != nil {
return m.Rules
}
return nil
}
func (m *Authentication) GetProviders() []*AuthProvider {
if m != nil {
return m.Providers
}
return nil
}
// Authentication rules for the service.
//
// By default, if a method has any authentication requirements, every request
// must include a valid credential matching one of the requirements.
// It's an error to include more than one kind of credential in a single
// request.
//
// If a method doesn't have any auth requirements, request credentials will be
// ignored.
type AuthenticationRule struct {
// Selects the methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
// The requirements for OAuth credentials.
Oauth *OAuthRequirements `protobuf:"bytes,2,opt,name=oauth" json:"oauth,omitempty"`
// Whether to allow requests without a credential. The credential can be
// an OAuth token, Google cookies (first-party auth) or EndUserCreds.
//
// For requests without credentials, if the service control environment is
// specified, each incoming request **must** be associated with a service
// consumer. This can be done by passing an API key that belongs to a consumer
// project.
AllowWithoutCredential bool `protobuf:"varint,5,opt,name=allow_without_credential,json=allowWithoutCredential" json:"allow_without_credential,omitempty"`
// Requirements for additional authentication providers.
Requirements []*AuthRequirement `protobuf:"bytes,7,rep,name=requirements" json:"requirements,omitempty"`
}
func (m *AuthenticationRule) Reset() { *m = AuthenticationRule{} }
func (m *AuthenticationRule) String() string { return proto.CompactTextString(m) }
func (*AuthenticationRule) ProtoMessage() {}
func (*AuthenticationRule) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *AuthenticationRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *AuthenticationRule) GetOauth() *OAuthRequirements {
if m != nil {
return m.Oauth
}
return nil
}
func (m *AuthenticationRule) GetAllowWithoutCredential() bool {
if m != nil {
return m.AllowWithoutCredential
}
return false
}
func (m *AuthenticationRule) GetRequirements() []*AuthRequirement {
if m != nil {
return m.Requirements
}
return nil
}
// Configuration for an anthentication provider, including support for
// [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthProvider struct {
// The unique identifier of the auth provider. It will be referred to by
// `AuthRequirement.provider_id`.
//
// Example: "bookstore_auth".
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
// Identifies the principal that issued the JWT. See
// https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.1
// Usually a URL or an email address.
//
// Example: https://securetoken.google.com
// Example: 1234567-compute@developer.gserviceaccount.com
Issuer string `protobuf:"bytes,2,opt,name=issuer" json:"issuer,omitempty"`
// URL of the provider's public key set to validate signature of the JWT. See
// [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
// Optional if the key set document:
// - can be retrieved from
// [OpenID Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html
// of the issuer.
// - can be inferred from the email domain of the issuer (e.g. a Google service account).
//
// Example: https://www.googleapis.com/oauth2/v1/certs
JwksUri string `protobuf:"bytes,3,opt,name=jwks_uri,json=jwksUri" json:"jwks_uri,omitempty"`
// The list of JWT
// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences will
// be accepted. When this setting is absent, only JWTs with audience
// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
// will be accepted. For example, if no audiences are in the setting,
// LibraryService API will only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
//
// Example:
//
// audiences: bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `protobuf:"bytes,4,opt,name=audiences" json:"audiences,omitempty"`
}
func (m *AuthProvider) Reset() { *m = AuthProvider{} }
func (m *AuthProvider) String() string { return proto.CompactTextString(m) }
func (*AuthProvider) ProtoMessage() {}
func (*AuthProvider) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *AuthProvider) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *AuthProvider) GetIssuer() string {
if m != nil {
return m.Issuer
}
return ""
}
func (m *AuthProvider) GetJwksUri() string {
if m != nil {
return m.JwksUri
}
return ""
}
func (m *AuthProvider) GetAudiences() string {
if m != nil {
return m.Audiences
}
return ""
}
// OAuth scopes are a way to define data and permissions on data. For example,
// there are scopes defined for "Read-only access to Google Calendar" and
// "Access to Cloud Platform". Users can consent to a scope for an application,
// giving it permission to access that data on their behalf.
//
// OAuth scope specifications should be fairly coarse grained; a user will need
// to see and understand the text description of what your scope means.
//
// In most cases: use one or at most two OAuth scopes for an entire family of
// products. If your product has multiple APIs, you should probably be sharing
// the OAuth scope across all of those APIs.
//
// When you need finer grained OAuth consent screens: talk with your product
// management about how developers will use them in practice.
//
// Please note that even though each of the canonical scopes is enough for a
// request to be accepted and passed to the backend, a request can still fail
// due to the backend requiring additional scopes or permissions.
type OAuthRequirements struct {
// The list of publicly documented OAuth scopes that are allowed access. An
// OAuth token containing any of these scopes will be accepted.
//
// Example:
//
// canonical_scopes: https://www.googleapis.com/auth/calendar,
// https://www.googleapis.com/auth/calendar.read
CanonicalScopes string `protobuf:"bytes,1,opt,name=canonical_scopes,json=canonicalScopes" json:"canonical_scopes,omitempty"`
}
func (m *OAuthRequirements) Reset() { *m = OAuthRequirements{} }
func (m *OAuthRequirements) String() string { return proto.CompactTextString(m) }
func (*OAuthRequirements) ProtoMessage() {}
func (*OAuthRequirements) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *OAuthRequirements) GetCanonicalScopes() string {
if m != nil {
return m.CanonicalScopes
}
return ""
}
// User-defined authentication requirements, including support for
// [JSON Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32).
type AuthRequirement struct {
// [id][google.api.AuthProvider.id] from authentication provider.
//
// Example:
//
// provider_id: bookstore_auth
ProviderId string `protobuf:"bytes,1,opt,name=provider_id,json=providerId" json:"provider_id,omitempty"`
// NOTE: This will be deprecated soon, once AuthProvider.audiences is
// implemented and accepted in all the runtime components.
//
// The list of JWT
// [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3).
// that are allowed to access. A JWT containing any of these audiences will
// be accepted. When this setting is absent, only JWTs with audience
// "https://[Service_name][google.api.Service.name]/[API_name][google.protobuf.Api.name]"
// will be accepted. For example, if no audiences are in the setting,
// LibraryService API will only accept JWTs with the following audience
// "https://library-example.googleapis.com/google.example.library.v1.LibraryService".
//
// Example:
//
// audiences: bookstore_android.apps.googleusercontent.com,
// bookstore_web.apps.googleusercontent.com
Audiences string `protobuf:"bytes,2,opt,name=audiences" json:"audiences,omitempty"`
}
func (m *AuthRequirement) Reset() { *m = AuthRequirement{} }
func (m *AuthRequirement) String() string { return proto.CompactTextString(m) }
func (*AuthRequirement) ProtoMessage() {}
func (*AuthRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *AuthRequirement) GetProviderId() string {
if m != nil {
return m.ProviderId
}
return ""
}
func (m *AuthRequirement) GetAudiences() string {
if m != nil {
return m.Audiences
}
return ""
}
func init() {
proto.RegisterType((*Authentication)(nil), "google.api.Authentication")
proto.RegisterType((*AuthenticationRule)(nil), "google.api.AuthenticationRule")
proto.RegisterType((*AuthProvider)(nil), "google.api.AuthProvider")
proto.RegisterType((*OAuthRequirements)(nil), "google.api.OAuthRequirements")
proto.RegisterType((*AuthRequirement)(nil), "google.api.AuthRequirement")
}
func init() { proto.RegisterFile("google/api/auth.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 437 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0xcd, 0x6e, 0xd3, 0x40,
0x10, 0x96, 0x9d, 0xa6, 0x8d, 0x27, 0x55, 0x0a, 0x2b, 0x51, 0x99, 0x52, 0x20, 0xf2, 0x29, 0x5c,
0x1c, 0xa9, 0x45, 0x08, 0x09, 0x09, 0xd4, 0x22, 0x84, 0x7a, 0x22, 0x32, 0x42, 0x48, 0x5c, 0xac,
0x65, 0x3d, 0x38, 0x4b, 0xdd, 0x1d, 0xb3, 0x3f, 0xcd, 0x8d, 0x87, 0xe1, 0xc9, 0x78, 0x94, 0xca,
0x6b, 0x37, 0x71, 0xd2, 0xe3, 0x7c, 0x3f, 0x33, 0xf3, 0xcd, 0x2e, 0x3c, 0x29, 0x89, 0xca, 0x0a,
0xe7, 0xbc, 0x96, 0x73, 0xee, 0xec, 0x32, 0xad, 0x35, 0x59, 0x62, 0xd0, 0xc2, 0x29, 0xaf, 0xe5,
0xc9, 0x69, 0x5f, 0xa2, 0x14, 0x59, 0x6e, 0x25, 0x29, 0xd3, 0x2a, 0x93, 0xbf, 0x30, 0xb9, 0x70,
0x76, 0x89, 0xca, 0x4a, 0xe1, 0x09, 0xf6, 0x1a, 0x86, 0xda, 0x55, 0x68, 0xe2, 0xc1, 0x74, 0x30,
0x1b, 0x9f, 0xbd, 0x48, 0x37, 0xbd, 0xd2, 0x6d, 0x69, 0xe6, 0x2a, 0xcc, 0x5a, 0x31, 0x7b, 0x03,
0x51, 0xad, 0xe9, 0x56, 0x16, 0xa8, 0x4d, 0xbc, 0xe7, 0x9d, 0xf1, 0xae, 0x73, 0xd1, 0x09, 0xb2,
0x8d, 0x34, 0xf9, 0x1f, 0x00, 0x7b, 0xd8, 0x95, 0x9d, 0xc0, 0xc8, 0x60, 0x85, 0xc2, 0x92, 0x8e,
0x83, 0x69, 0x30, 0x8b, 0xb2, 0x75, 0xcd, 0xce, 0x61, 0x48, 0x4d, 0xd6, 0x38, 0x9c, 0x06, 0xb3,
0xf1, 0xd9, 0xf3, 0xfe, 0x98, 0x2f, 0x4d, 0xaf, 0x0c, 0xff, 0x38, 0xa9, 0xf1, 0x06, 0x95, 0x35,
0x59, 0xab, 0x65, 0x6f, 0x21, 0xe6, 0x55, 0x45, 0xab, 0x7c, 0x25, 0xed, 0x92, 0x9c, 0xcd, 0x85,
0xc6, 0xa2, 0x19, 0xca, 0xab, 0x78, 0x38, 0x0d, 0x66, 0xa3, 0xec, 0xd8, 0xf3, 0xdf, 0x5b, 0xfa,
0xe3, 0x9a, 0x65, 0x1f, 0xe0, 0x50, 0xf7, 0x1a, 0xc6, 0x07, 0x3e, 0xdc, 0xb3, 0xdd, 0x70, 0xbd,
0xa1, 0xd9, 0x96, 0x21, 0x21, 0x38, 0xec, 0xa7, 0x67, 0x13, 0x08, 0x65, 0xd1, 0xa5, 0x0a, 0x65,
0xc1, 0x8e, 0x61, 0x5f, 0x1a, 0xe3, 0x50, 0xfb, 0x40, 0x51, 0xd6, 0x55, 0xec, 0x29, 0x8c, 0x7e,
0xaf, 0xae, 0x4d, 0xee, 0xb4, 0x8c, 0x07, 0x9e, 0x39, 0x68, 0xea, 0x6f, 0x5a, 0xb2, 0x53, 0x88,
0xb8, 0x2b, 0x24, 0x2a, 0x81, 0xcd, 0xb5, 0x1b, 0x6e, 0x03, 0x24, 0xef, 0xe1, 0xf1, 0x83, 0x3b,
0xb0, 0x57, 0xf0, 0x48, 0x70, 0x45, 0x4a, 0x0a, 0x5e, 0xe5, 0x46, 0x50, 0x8d, 0xa6, 0xdb, 0xe1,
0x68, 0x8d, 0x7f, 0xf5, 0x70, 0xb2, 0x80, 0xa3, 0x1d, 0x3b, 0x7b, 0x09, 0xe3, 0xfb, 0x37, 0xcb,
0xd7, 0xcb, 0xc3, 0x3d, 0x74, 0x55, 0x6c, 0x6f, 0x14, 0xee, 0x6c, 0x74, 0x79, 0x0d, 0x13, 0x41,
0x37, 0xbd, 0x93, 0x5d, 0x46, 0xdd, 0x49, 0x2c, 0x2d, 0x82, 0x1f, 0x9f, 0x3a, 0xa2, 0xa4, 0x8a,
0xab, 0x32, 0x25, 0x5d, 0xce, 0x4b, 0x54, 0xfe, 0x83, 0xce, 0x5b, 0x8a, 0xd7, 0xd2, 0xf8, 0x1f,
0x6c, 0x50, 0xdf, 0x4a, 0x81, 0x82, 0xd4, 0x2f, 0x59, 0xbe, 0xdb, 0xaa, 0xfe, 0x85, 0x7b, 0x9f,
0x2f, 0x16, 0x57, 0x3f, 0xf7, 0xbd, 0xf1, 0xfc, 0x2e, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x6d, 0xc6,
0x5e, 0x1c, 0x03, 0x00, 0x00,
}

View File

@ -0,0 +1,98 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/backend.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// `Backend` defines the backend configuration for a service.
type Backend struct {
// A list of API backend rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*BackendRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
}
func (m *Backend) Reset() { *m = Backend{} }
func (m *Backend) String() string { return proto.CompactTextString(m) }
func (*Backend) ProtoMessage() {}
func (*Backend) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (m *Backend) GetRules() []*BackendRule {
if m != nil {
return m.Rules
}
return nil
}
// A backend rule provides configuration for an individual API element.
type BackendRule struct {
// Selects the methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
// The address of the API backend.
Address string `protobuf:"bytes,2,opt,name=address" json:"address,omitempty"`
// The number of seconds to wait for a response from a request. The
// default depends on the deployment context.
Deadline float64 `protobuf:"fixed64,3,opt,name=deadline" json:"deadline,omitempty"`
}
func (m *BackendRule) Reset() { *m = BackendRule{} }
func (m *BackendRule) String() string { return proto.CompactTextString(m) }
func (*BackendRule) ProtoMessage() {}
func (*BackendRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
func (m *BackendRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *BackendRule) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
func (m *BackendRule) GetDeadline() float64 {
if m != nil {
return m.Deadline
}
return 0
}
func init() {
proto.RegisterType((*Backend)(nil), "google.api.Backend")
proto.RegisterType((*BackendRule)(nil), "google.api.BackendRule")
}
func init() { proto.RegisterFile("google/api/backend.proto", fileDescriptor1) }
var fileDescriptor1 = []byte{
// 227 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x31, 0x4f, 0x03, 0x21,
0x18, 0x86, 0x43, 0xab, 0x56, 0xbf, 0x1a, 0x87, 0x5b, 0x24, 0x4e, 0x97, 0x4e, 0xb7, 0xc8, 0x25,
0xba, 0x98, 0x38, 0x79, 0x89, 0x31, 0x6e, 0x17, 0x46, 0x17, 0x43, 0xe1, 0x93, 0x10, 0x91, 0x8f,
0x40, 0xeb, 0x0f, 0xf2, 0x97, 0x9a, 0xc2, 0x59, 0xdb, 0xf1, 0xe1, 0x79, 0xdf, 0xc0, 0x0b, 0x70,
0x4b, 0x64, 0x3d, 0xf6, 0x2a, 0xba, 0x7e, 0xad, 0xf4, 0x27, 0x06, 0x23, 0x62, 0xa2, 0x0d, 0x35,
0x50, 0x8d, 0x50, 0xd1, 0xad, 0x1e, 0x60, 0x31, 0x54, 0xd9, 0xdc, 0xc2, 0x69, 0xda, 0x7a, 0xcc,
0x9c, 0xb5, 0xf3, 0x6e, 0x79, 0x77, 0x2d, 0xfe, 0x63, 0x62, 0xca, 0xc8, 0xad, 0x47, 0x59, 0x53,
0xab, 0x77, 0x58, 0x1e, 0x9c, 0x36, 0x37, 0x70, 0x9e, 0xd1, 0xa3, 0xde, 0x50, 0xe2, 0xac, 0x65,
0xdd, 0x85, 0xdc, 0x73, 0xc3, 0x61, 0xa1, 0x8c, 0x49, 0x98, 0x33, 0x9f, 0x15, 0xf5, 0x87, 0xbb,
0x96, 0x41, 0x65, 0xbc, 0x0b, 0xc8, 0xe7, 0x2d, 0xeb, 0x98, 0xdc, 0xf3, 0x10, 0xe0, 0x4a, 0xd3,
0xd7, 0xc1, 0x2b, 0x86, 0xcb, 0xe9, 0xc2, 0x71, 0x37, 0x63, 0x64, 0x6f, 0xcf, 0x93, 0xb3, 0xe4,
0x55, 0xb0, 0x82, 0x92, 0xed, 0x2d, 0x86, 0x32, 0xb2, 0xaf, 0x4a, 0x45, 0x97, 0xcb, 0x0f, 0x64,
0x4c, 0xdf, 0x4e, 0xa3, 0xa6, 0xf0, 0xe1, 0xec, 0xe3, 0x11, 0xfd, 0xcc, 0x4e, 0x5e, 0x9e, 0xc6,
0xd7, 0xf5, 0x59, 0x29, 0xde, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x93, 0x9e, 0x00, 0x39,
0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,150 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/billing.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import _ "google.golang.org/genproto/googleapis/api/metric"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Billing related configuration of the service.
//
// The following example shows how to configure metrics for billing:
//
// metrics:
// - name: library.googleapis.com/read_calls
// metric_kind: DELTA
// value_type: INT64
// - name: library.googleapis.com/write_calls
// metric_kind: DELTA
// value_type: INT64
// billing:
// metrics:
// - library.googleapis.com/read_calls
// - library.googleapis.com/write_calls
//
// The next example shows how to enable billing status check and customize the
// check behavior. It makes sure billing status check is included in the `Check`
// method of [Service Control API](https://cloud.google.com/service-control/).
// In the example, "google.storage.Get" method can be served when the billing
// status is either `current` or `delinquent`, while "google.storage.Write"
// method can only be served when the billing status is `current`:
//
// billing:
// rules:
// - selector: google.storage.Get
// allowed_statuses:
// - current
// - delinquent
// - selector: google.storage.Write
// allowed_statuses: current
//
// Mostly services should only allow `current` status when serving requests.
// In addition, services can choose to allow both `current` and `delinquent`
// statuses when serving read-only requests to resources. If there's no
// matching selector for operation, no billing status check will be performed.
//
type Billing struct {
// Names of the metrics to report to billing. Each name must
// be defined in [Service.metrics][google.api.Service.metrics] section.
Metrics []string `protobuf:"bytes,1,rep,name=metrics" json:"metrics,omitempty"`
// A list of billing status rules for configuring billing status check.
Rules []*BillingStatusRule `protobuf:"bytes,5,rep,name=rules" json:"rules,omitempty"`
}
func (m *Billing) Reset() { *m = Billing{} }
func (m *Billing) String() string { return proto.CompactTextString(m) }
func (*Billing) ProtoMessage() {}
func (*Billing) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
func (m *Billing) GetMetrics() []string {
if m != nil {
return m.Metrics
}
return nil
}
func (m *Billing) GetRules() []*BillingStatusRule {
if m != nil {
return m.Rules
}
return nil
}
// Defines the billing status requirements for operations.
//
// When used with
// [Service Control API](https://cloud.google.com/service-control/), the
// following statuses are supported:
//
// - **current**: the associated billing account is up to date and capable of
// paying for resource usages.
// - **delinquent**: the associated billing account has a correctable problem,
// such as late payment.
//
// Mostly services should only allow `current` status when serving requests.
// In addition, services can choose to allow both `current` and `delinquent`
// statuses when serving read-only requests to resources. If the list of
// allowed_statuses is empty, it means no billing requirement.
//
type BillingStatusRule struct {
// Selects the operation names to which this rule applies.
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
// Allowed billing statuses. The billing status check passes if the actual
// billing status matches any of the provided values here.
AllowedStatuses []string `protobuf:"bytes,2,rep,name=allowed_statuses,json=allowedStatuses" json:"allowed_statuses,omitempty"`
}
func (m *BillingStatusRule) Reset() { *m = BillingStatusRule{} }
func (m *BillingStatusRule) String() string { return proto.CompactTextString(m) }
func (*BillingStatusRule) ProtoMessage() {}
func (*BillingStatusRule) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
func (m *BillingStatusRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *BillingStatusRule) GetAllowedStatuses() []string {
if m != nil {
return m.AllowedStatuses
}
return nil
}
func init() {
proto.RegisterType((*Billing)(nil), "google.api.Billing")
proto.RegisterType((*BillingStatusRule)(nil), "google.api.BillingStatusRule")
}
func init() { proto.RegisterFile("google/api/billing.proto", fileDescriptor2) }
var fileDescriptor2 = []byte{
// 253 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0x4f, 0x4b, 0x03, 0x31,
0x10, 0xc5, 0x59, 0x4b, 0xad, 0x8d, 0xe2, 0x9f, 0xbd, 0x18, 0x16, 0x85, 0xa5, 0xa7, 0xf5, 0x92,
0x05, 0x7b, 0xf4, 0xb6, 0xe0, 0xbd, 0x6c, 0x2f, 0xd2, 0x8b, 0xa4, 0x71, 0x0c, 0x81, 0x34, 0xb3,
0x64, 0xb2, 0xfa, 0xf5, 0xc5, 0x24, 0xea, 0x4a, 0x8f, 0x6f, 0x7e, 0xef, 0x65, 0xe6, 0x85, 0x71,
0x8d, 0xa8, 0x2d, 0xb4, 0x72, 0x30, 0xed, 0xde, 0x58, 0x6b, 0x9c, 0x16, 0x83, 0xc7, 0x80, 0x25,
0x4b, 0x44, 0xc8, 0xc1, 0x54, 0x77, 0x13, 0x97, 0x74, 0x0e, 0x83, 0x0c, 0x06, 0x1d, 0x25, 0x67,
0x75, 0x3b, 0xa1, 0x07, 0x08, 0xde, 0xa8, 0x04, 0x56, 0x2f, 0x6c, 0xd1, 0xa5, 0x37, 0x4b, 0xce,
0x16, 0x09, 0x11, 0x2f, 0xea, 0x59, 0xb3, 0xec, 0x7f, 0x64, 0xb9, 0x66, 0x73, 0x3f, 0x5a, 0x20,
0x3e, 0xaf, 0x67, 0xcd, 0xf9, 0xe3, 0xbd, 0xf8, 0xdb, 0x2b, 0x72, 0x7a, 0x1b, 0x64, 0x18, 0xa9,
0x1f, 0x2d, 0xf4, 0xc9, 0xbb, 0xda, 0xb1, 0x9b, 0x23, 0x56, 0x56, 0xec, 0x8c, 0xc0, 0x82, 0x0a,
0xe8, 0x79, 0x51, 0x17, 0xcd, 0xb2, 0xff, 0xd5, 0xe5, 0x03, 0xbb, 0x96, 0xd6, 0xe2, 0x27, 0xbc,
0xbd, 0x52, 0x4c, 0x00, 0xf1, 0x93, 0x78, 0xc8, 0x55, 0x9e, 0x6f, 0xf3, 0xb8, 0xd3, 0xec, 0x52,
0xe1, 0x61, 0x72, 0x46, 0x77, 0x91, 0x77, 0x6d, 0xbe, 0x5b, 0x6d, 0x8a, 0xdd, 0x73, 0x66, 0x1a,
0xad, 0x74, 0x5a, 0xa0, 0xd7, 0xad, 0x06, 0x17, 0x3b, 0xb7, 0x09, 0xc9, 0xc1, 0x50, 0xfc, 0x0f,
0x02, 0xff, 0x61, 0x14, 0x28, 0x74, 0xef, 0x46, 0x3f, 0xfd, 0x53, 0xfb, 0xd3, 0x98, 0x58, 0x7f,
0x05, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x90, 0x2d, 0x32, 0x84, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,158 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/consumer.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Supported data type of the property values
type Property_PropertyType int32
const (
// The type is unspecified, and will result in an error.
Property_UNSPECIFIED Property_PropertyType = 0
// The type is `int64`.
Property_INT64 Property_PropertyType = 1
// The type is `bool`.
Property_BOOL Property_PropertyType = 2
// The type is `string`.
Property_STRING Property_PropertyType = 3
// The type is 'double'.
Property_DOUBLE Property_PropertyType = 4
)
var Property_PropertyType_name = map[int32]string{
0: "UNSPECIFIED",
1: "INT64",
2: "BOOL",
3: "STRING",
4: "DOUBLE",
}
var Property_PropertyType_value = map[string]int32{
"UNSPECIFIED": 0,
"INT64": 1,
"BOOL": 2,
"STRING": 3,
"DOUBLE": 4,
}
func (x Property_PropertyType) String() string {
return proto.EnumName(Property_PropertyType_name, int32(x))
}
func (Property_PropertyType) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{1, 0} }
// A descriptor for defining project properties for a service. One service may
// have many consumer projects, and the service may want to behave differently
// depending on some properties on the project. For example, a project may be
// associated with a school, or a business, or a government agency, a business
// type property on the project may affect how a service responds to the client.
// This descriptor defines which properties are allowed to be set on a project.
//
// Example:
//
// project_properties:
// properties:
// - name: NO_WATERMARK
// type: BOOL
// description: Allows usage of the API without watermarks.
// - name: EXTENDED_TILE_CACHE_PERIOD
// type: INT64
type ProjectProperties struct {
// List of per consumer project-specific properties.
Properties []*Property `protobuf:"bytes,1,rep,name=properties" json:"properties,omitempty"`
}
func (m *ProjectProperties) Reset() { *m = ProjectProperties{} }
func (m *ProjectProperties) String() string { return proto.CompactTextString(m) }
func (*ProjectProperties) ProtoMessage() {}
func (*ProjectProperties) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
func (m *ProjectProperties) GetProperties() []*Property {
if m != nil {
return m.Properties
}
return nil
}
// Defines project properties.
//
// API services can define properties that can be assigned to consumer projects
// so that backends can perform response customization without having to make
// additional calls or maintain additional storage. For example, Maps API
// defines properties that controls map tile cache period, or whether to embed a
// watermark in a result.
//
// These values can be set via API producer console. Only API providers can
// define and set these properties.
type Property struct {
// The name of the property (a.k.a key).
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// The type of this property.
Type Property_PropertyType `protobuf:"varint,2,opt,name=type,enum=google.api.Property_PropertyType" json:"type,omitempty"`
// The description of the property
Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
}
func (m *Property) Reset() { *m = Property{} }
func (m *Property) String() string { return proto.CompactTextString(m) }
func (*Property) ProtoMessage() {}
func (*Property) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
func (m *Property) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Property) GetType() Property_PropertyType {
if m != nil {
return m.Type
}
return Property_UNSPECIFIED
}
func (m *Property) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func init() {
proto.RegisterType((*ProjectProperties)(nil), "google.api.ProjectProperties")
proto.RegisterType((*Property)(nil), "google.api.Property")
proto.RegisterEnum("google.api.Property_PropertyType", Property_PropertyType_name, Property_PropertyType_value)
}
func init() { proto.RegisterFile("google/api/consumer.proto", fileDescriptor3) }
var fileDescriptor3 = []byte{
// 299 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4f, 0x4f, 0xf2, 0x40,
0x10, 0xc6, 0xdf, 0x85, 0xbe, 0x04, 0x06, 0xc5, 0xba, 0xf1, 0x50, 0x6f, 0x95, 0x13, 0xa7, 0x36,
0x41, 0xf4, 0xe2, 0xad, 0x50, 0x4d, 0x13, 0x02, 0x4d, 0x81, 0x8b, 0xb7, 0x5a, 0xc7, 0x75, 0x0d,
0xec, 0x6c, 0xb6, 0xd5, 0x84, 0x0f, 0xe8, 0xf7, 0x32, 0x2c, 0x88, 0x35, 0xf1, 0xf6, 0xcc, 0x3e,
0x7f, 0xb2, 0xf9, 0xc1, 0xa5, 0x20, 0x12, 0x6b, 0x0c, 0x73, 0x2d, 0xc3, 0x82, 0x54, 0xf9, 0xbe,
0x41, 0x13, 0x68, 0x43, 0x15, 0x71, 0xd8, 0x5b, 0x41, 0xae, 0x65, 0x3f, 0x81, 0xf3, 0xd4, 0xd0,
0x1b, 0x16, 0x55, 0x6a, 0x48, 0xa3, 0xa9, 0x24, 0x96, 0x7c, 0x04, 0xa0, 0x8f, 0x97, 0xc7, 0xfc,
0xe6, 0xa0, 0x3b, 0xbc, 0x08, 0x7e, 0x5a, 0xc1, 0x21, 0xbb, 0xcd, 0x6a, 0xb9, 0xfe, 0x27, 0x83,
0xf6, 0xb7, 0xc1, 0x39, 0x38, 0x2a, 0xdf, 0xa0, 0xc7, 0x7c, 0x36, 0xe8, 0x64, 0x56, 0xf3, 0x1b,
0x70, 0xaa, 0xad, 0x46, 0xaf, 0xe1, 0xb3, 0x41, 0x6f, 0x78, 0xf5, 0xd7, 0xe0, 0x51, 0x2c, 0xb7,
0x1a, 0x33, 0x1b, 0xe7, 0x3e, 0x74, 0x9f, 0xb1, 0x2c, 0x8c, 0xd4, 0x95, 0x24, 0xe5, 0x35, 0xed,
0x62, 0xfd, 0xa9, 0x3f, 0x85, 0x93, 0x7a, 0x8f, 0x9f, 0x41, 0x77, 0x35, 0x5b, 0xa4, 0xf1, 0x38,
0xb9, 0x4f, 0xe2, 0x89, 0xfb, 0x8f, 0x77, 0xe0, 0x7f, 0x32, 0x5b, 0xde, 0x8e, 0x5c, 0xc6, 0xdb,
0xe0, 0x44, 0xf3, 0xf9, 0xd4, 0x6d, 0x70, 0x80, 0xd6, 0x62, 0x99, 0x25, 0xb3, 0x07, 0xb7, 0xb9,
0xd3, 0x93, 0xf9, 0x2a, 0x9a, 0xc6, 0xae, 0x13, 0xbd, 0x42, 0xaf, 0xa0, 0x4d, 0xed, 0x77, 0xd1,
0xe9, 0xf8, 0x00, 0x30, 0xdd, 0xf1, 0x4b, 0xd9, 0x63, 0x7c, 0x30, 0x05, 0xad, 0x73, 0x25, 0x02,
0x32, 0x22, 0x14, 0xa8, 0x2c, 0xdd, 0x70, 0x6f, 0xe5, 0x5a, 0x96, 0x96, 0x7d, 0x89, 0xe6, 0x43,
0x16, 0x58, 0x90, 0x7a, 0x91, 0xe2, 0xee, 0xd7, 0xf5, 0xd4, 0xb2, 0x8d, 0xeb, 0xaf, 0x00, 0x00,
0x00, 0xff, 0xff, 0xb7, 0xa4, 0x04, 0x2c, 0xac, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,114 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/context.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// `Context` defines which contexts an API requests.
//
// Example:
//
// context:
// rules:
// - selector: "*"
// requested:
// - google.rpc.context.ProjectContext
// - google.rpc.context.OriginContext
//
// The above specifies that all methods in the API request
// `google.rpc.context.ProjectContext` and
// `google.rpc.context.OriginContext`.
//
// Available context types are defined in package
// `google.rpc.context`.
type Context struct {
// A list of RPC context rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*ContextRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
}
func (m *Context) Reset() { *m = Context{} }
func (m *Context) String() string { return proto.CompactTextString(m) }
func (*Context) ProtoMessage() {}
func (*Context) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} }
func (m *Context) GetRules() []*ContextRule {
if m != nil {
return m.Rules
}
return nil
}
// A context rule provides information about the context for an individual API
// element.
type ContextRule struct {
// Selects the methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
// A list of full type names of requested contexts.
Requested []string `protobuf:"bytes,2,rep,name=requested" json:"requested,omitempty"`
// A list of full type names of provided contexts.
Provided []string `protobuf:"bytes,3,rep,name=provided" json:"provided,omitempty"`
}
func (m *ContextRule) Reset() { *m = ContextRule{} }
func (m *ContextRule) String() string { return proto.CompactTextString(m) }
func (*ContextRule) ProtoMessage() {}
func (*ContextRule) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} }
func (m *ContextRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *ContextRule) GetRequested() []string {
if m != nil {
return m.Requested
}
return nil
}
func (m *ContextRule) GetProvided() []string {
if m != nil {
return m.Provided
}
return nil
}
func init() {
proto.RegisterType((*Context)(nil), "google.api.Context")
proto.RegisterType((*ContextRule)(nil), "google.api.ContextRule")
}
func init() { proto.RegisterFile("google/api/context.proto", fileDescriptor4) }
var fileDescriptor4 = []byte{
// 231 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x4f, 0x4b, 0xc4, 0x30,
0x14, 0xc4, 0xe9, 0xd6, 0x7f, 0x7d, 0x2b, 0x1e, 0x7a, 0x31, 0x88, 0x87, 0xb2, 0xa7, 0x5e, 0x4c,
0x41, 0x2f, 0x82, 0x27, 0x57, 0x44, 0xbc, 0x95, 0x1e, 0xbd, 0xc5, 0xf4, 0x19, 0x02, 0x31, 0x2f,
0x26, 0xe9, 0xe2, 0xe7, 0xf1, 0x93, 0xca, 0x26, 0x65, 0xff, 0x1c, 0x67, 0x7e, 0x33, 0x24, 0xf3,
0x80, 0x29, 0x22, 0x65, 0xb0, 0x13, 0x4e, 0x77, 0x92, 0x6c, 0xc4, 0xdf, 0xc8, 0x9d, 0xa7, 0x48,
0x35, 0x64, 0xc2, 0x85, 0xd3, 0xab, 0x47, 0x38, 0x7f, 0xc9, 0xb0, 0xbe, 0x83, 0x53, 0x3f, 0x19,
0x0c, 0xac, 0x68, 0xca, 0x76, 0x79, 0x7f, 0xcd, 0xf7, 0x31, 0x3e, 0x67, 0x86, 0xc9, 0xe0, 0x90,
0x53, 0x2b, 0x09, 0xcb, 0x03, 0xb7, 0xbe, 0x81, 0x8b, 0x80, 0x06, 0x65, 0x24, 0xcf, 0x8a, 0xa6,
0x68, 0xab, 0x61, 0xa7, 0xeb, 0x5b, 0xa8, 0x3c, 0xfe, 0x4c, 0x18, 0x22, 0x8e, 0x6c, 0xd1, 0x94,
0x6d, 0x35, 0xec, 0x8d, 0x6d, 0xd3, 0x79, 0xda, 0xe8, 0x11, 0x47, 0x56, 0x26, 0xb8, 0xd3, 0x6b,
0x0b, 0x57, 0x92, 0xbe, 0x0f, 0x7e, 0xb2, 0xbe, 0x9c, 0x1f, 0xed, 0xb7, 0x53, 0xfa, 0xe2, 0xe3,
0x75, 0x66, 0x8a, 0x8c, 0xb0, 0x8a, 0x93, 0x57, 0x9d, 0x42, 0x9b, 0x86, 0x76, 0x19, 0x09, 0xa7,
0x43, 0xba, 0x42, 0x40, 0xbf, 0xd1, 0x12, 0x25, 0xd9, 0x2f, 0xad, 0x9e, 0x8e, 0xd4, 0xdf, 0xe2,
0xe4, 0xed, 0xb9, 0x7f, 0xff, 0x3c, 0x4b, 0xc5, 0x87, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb5,
0x18, 0x98, 0x7a, 0x3d, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,55 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/control.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Selects and configures the service controller used by the service. The
// service controller handles features like abuse, quota, billing, logging,
// monitoring, etc.
type Control struct {
// The service control environment to use. If empty, no control plane
// feature (like quota and billing) will be enabled.
Environment string `protobuf:"bytes,1,opt,name=environment" json:"environment,omitempty"`
}
func (m *Control) Reset() { *m = Control{} }
func (m *Control) String() string { return proto.CompactTextString(m) }
func (*Control) ProtoMessage() {}
func (*Control) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} }
func (m *Control) GetEnvironment() string {
if m != nil {
return m.Environment
}
return ""
}
func init() {
proto.RegisterType((*Control)(nil), "google.api.Control")
}
func init() { proto.RegisterFile("google/api/control.proto", fileDescriptor5) }
var fileDescriptor5 = []byte{
// 165 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x48, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xd1, 0x2b,
0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xe8, 0x25, 0x16, 0x64, 0x2a, 0x69, 0x73, 0xb1,
0x3b, 0x43, 0x24, 0x85, 0x14, 0xb8, 0xb8, 0x53, 0xf3, 0xca, 0x32, 0x8b, 0xf2, 0xf3, 0x72, 0x53,
0xf3, 0x4a, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x90, 0x85, 0x9c, 0xf2, 0xb8, 0xf8, 0x92,
0xf3, 0x73, 0xf5, 0x10, 0xda, 0x9d, 0x78, 0xa0, 0x9a, 0x03, 0x40, 0x06, 0x07, 0x30, 0x46, 0xb9,
0x42, 0xe5, 0xd2, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3,
0xc0, 0xd6, 0xea, 0x43, 0xa4, 0x12, 0x0b, 0x32, 0x8b, 0xc1, 0x6e, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb,
0x4c, 0x4e, 0x4d, 0xce, 0xcf, 0x4b, 0xcb, 0x4c, 0xb7, 0x46, 0xe1, 0x2d, 0x62, 0x62, 0x71, 0x77,
0x0c, 0xf0, 0x4c, 0x62, 0x03, 0x6b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x44, 0x6e, 0x78,
0xbd, 0xcb, 0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,267 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/documentation.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// `Documentation` provides the information for describing a service.
//
// Example:
// <pre><code>documentation:
// summary: >
// The Google Calendar API gives access
// to most calendar features.
// pages:
// - name: Overview
// content: &#40;== include google/foo/overview.md ==&#41;
// - name: Tutorial
// content: &#40;== include google/foo/tutorial.md ==&#41;
// subpages;
// - name: Java
// content: &#40;== include google/foo/tutorial_java.md ==&#41;
// rules:
// - selector: google.calendar.Calendar.Get
// description: >
// ...
// - selector: google.calendar.Calendar.Put
// description: >
// ...
// </code></pre>
// Documentation is provided in markdown syntax. In addition to
// standard markdown features, definition lists, tables and fenced
// code blocks are supported. Section headers can be provided and are
// interpreted relative to the section nesting of the context where
// a documentation fragment is embedded.
//
// Documentation from the IDL is merged with documentation defined
// via the config at normalization time, where documentation provided
// by config rules overrides IDL provided.
//
// A number of constructs specific to the API platform are supported
// in documentation text.
//
// In order to reference a proto element, the following
// notation can be used:
// <pre><code>&#91;fully.qualified.proto.name]&#91;]</code></pre>
// To override the display text used for the link, this can be used:
// <pre><code>&#91;display text]&#91;fully.qualified.proto.name]</code></pre>
// Text can be excluded from doc using the following notation:
// <pre><code>&#40;-- internal comment --&#41;</code></pre>
// Comments can be made conditional using a visibility label. The below
// text will be only rendered if the `BETA` label is available:
// <pre><code>&#40;--BETA: comment for BETA users --&#41;</code></pre>
// A few directives are available in documentation. Note that
// directives must appear on a single line to be properly
// identified. The `include` directive includes a markdown file from
// an external source:
// <pre><code>&#40;== include path/to/file ==&#41;</code></pre>
// The `resource_for` directive marks a message to be the resource of
// a collection in REST view. If it is not specified, tools attempt
// to infer the resource from the operations in a collection:
// <pre><code>&#40;== resource_for v1.shelves.books ==&#41;</code></pre>
// The directive `suppress_warning` does not directly affect documentation
// and is documented together with service config validation.
type Documentation struct {
// A short summary of what the service does. Can only be provided by
// plain text.
Summary string `protobuf:"bytes,1,opt,name=summary" json:"summary,omitempty"`
// The top level pages for the documentation set.
Pages []*Page `protobuf:"bytes,5,rep,name=pages" json:"pages,omitempty"`
// A list of documentation rules that apply to individual API elements.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*DocumentationRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"`
// The URL to the root of documentation.
DocumentationRootUrl string `protobuf:"bytes,4,opt,name=documentation_root_url,json=documentationRootUrl" json:"documentation_root_url,omitempty"`
// Declares a single overview page. For example:
// <pre><code>documentation:
// summary: ...
// overview: &#40;== include overview.md ==&#41;
// </code></pre>
// This is a shortcut for the following declaration (using pages style):
// <pre><code>documentation:
// summary: ...
// pages:
// - name: Overview
// content: &#40;== include overview.md ==&#41;
// </code></pre>
// Note: you cannot specify both `overview` field and `pages` field.
Overview string `protobuf:"bytes,2,opt,name=overview" json:"overview,omitempty"`
}
func (m *Documentation) Reset() { *m = Documentation{} }
func (m *Documentation) String() string { return proto.CompactTextString(m) }
func (*Documentation) ProtoMessage() {}
func (*Documentation) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} }
func (m *Documentation) GetSummary() string {
if m != nil {
return m.Summary
}
return ""
}
func (m *Documentation) GetPages() []*Page {
if m != nil {
return m.Pages
}
return nil
}
func (m *Documentation) GetRules() []*DocumentationRule {
if m != nil {
return m.Rules
}
return nil
}
func (m *Documentation) GetDocumentationRootUrl() string {
if m != nil {
return m.DocumentationRootUrl
}
return ""
}
func (m *Documentation) GetOverview() string {
if m != nil {
return m.Overview
}
return ""
}
// A documentation rule provides information about individual API elements.
type DocumentationRule struct {
// The selector is a comma-separated list of patterns. Each pattern is a
// qualified name of the element which may end in "*", indicating a wildcard.
// Wildcards are only allowed at the end and for a whole component of the
// qualified name, i.e. "foo.*" is ok, but not "foo.b*" or "foo.*.bar". To
// specify a default for all applicable elements, the whole pattern "*"
// is used.
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
// Description of the selected API(s).
Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
// Deprecation description of the selected element(s). It can be provided if an
// element is marked as `deprecated`.
DeprecationDescription string `protobuf:"bytes,3,opt,name=deprecation_description,json=deprecationDescription" json:"deprecation_description,omitempty"`
}
func (m *DocumentationRule) Reset() { *m = DocumentationRule{} }
func (m *DocumentationRule) String() string { return proto.CompactTextString(m) }
func (*DocumentationRule) ProtoMessage() {}
func (*DocumentationRule) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} }
func (m *DocumentationRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *DocumentationRule) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *DocumentationRule) GetDeprecationDescription() string {
if m != nil {
return m.DeprecationDescription
}
return ""
}
// Represents a documentation page. A page can contain subpages to represent
// nested documentation set structure.
type Page struct {
// The name of the page. It will be used as an identity of the page to
// generate URI of the page, text of the link to this page in navigation,
// etc. The full page name (start from the root page name to this page
// concatenated with `.`) can be used as reference to the page in your
// documentation. For example:
// <pre><code>pages:
// - name: Tutorial
// content: &#40;== include tutorial.md ==&#41;
// subpages:
// - name: Java
// content: &#40;== include tutorial_java.md ==&#41;
// </code></pre>
// You can reference `Java` page using Markdown reference link syntax:
// `[Java][Tutorial.Java]`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// The Markdown content of the page. You can use <code>&#40;== include {path} ==&#41;</code>
// to include content from a Markdown file.
Content string `protobuf:"bytes,2,opt,name=content" json:"content,omitempty"`
// Subpages of this page. The order of subpages specified here will be
// honored in the generated docset.
Subpages []*Page `protobuf:"bytes,3,rep,name=subpages" json:"subpages,omitempty"`
}
func (m *Page) Reset() { *m = Page{} }
func (m *Page) String() string { return proto.CompactTextString(m) }
func (*Page) ProtoMessage() {}
func (*Page) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{2} }
func (m *Page) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Page) GetContent() string {
if m != nil {
return m.Content
}
return ""
}
func (m *Page) GetSubpages() []*Page {
if m != nil {
return m.Subpages
}
return nil
}
func init() {
proto.RegisterType((*Documentation)(nil), "google.api.Documentation")
proto.RegisterType((*DocumentationRule)(nil), "google.api.DocumentationRule")
proto.RegisterType((*Page)(nil), "google.api.Page")
}
func init() { proto.RegisterFile("google/api/documentation.proto", fileDescriptor6) }
var fileDescriptor6 = []byte{
// 356 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x6a, 0xe3, 0x30,
0x14, 0x45, 0x71, 0xec, 0xcc, 0x64, 0x5e, 0x98, 0x61, 0x46, 0x0c, 0x19, 0x33, 0xd0, 0x12, 0xb2,
0x28, 0x59, 0x14, 0x1b, 0x9a, 0x42, 0x17, 0x5d, 0x35, 0xa4, 0x94, 0xee, 0x8c, 0xa1, 0x9b, 0x6e,
0x82, 0xa2, 0xbc, 0x0a, 0x83, 0xad, 0x67, 0x24, 0x39, 0xa5, 0xbf, 0xd0, 0xcf, 0xe8, 0x57, 0xf5,
0x73, 0x8a, 0x65, 0x27, 0xb1, 0x29, 0xdd, 0xf9, 0xfa, 0x1e, 0xe9, 0x3e, 0x5d, 0x09, 0x4e, 0x25,
0x91, 0xcc, 0x31, 0xe6, 0x65, 0x16, 0x6f, 0x49, 0x54, 0x05, 0x2a, 0xcb, 0x6d, 0x46, 0x2a, 0x2a,
0x35, 0x59, 0x62, 0xd0, 0xf8, 0x11, 0x2f, 0xb3, 0xd9, 0xbb, 0x07, 0x3f, 0x57, 0x5d, 0x86, 0x85,
0xf0, 0xdd, 0x54, 0x45, 0xc1, 0xf5, 0x4b, 0xe8, 0x4d, 0xbd, 0xf9, 0x8f, 0x74, 0x2f, 0xd9, 0x19,
0x0c, 0x4b, 0x2e, 0xd1, 0x84, 0xc3, 0xa9, 0x3f, 0x1f, 0x5f, 0xfc, 0x8e, 0x8e, 0xfb, 0x44, 0x09,
0x97, 0x98, 0x36, 0x36, 0x5b, 0xc0, 0x50, 0x57, 0x39, 0x9a, 0xd0, 0x77, 0xdc, 0x49, 0x97, 0xeb,
0x65, 0xa5, 0x55, 0x8e, 0x69, 0xc3, 0xb2, 0x4b, 0x98, 0xf4, 0x66, 0x5d, 0x6b, 0x22, 0xbb, 0xae,
0x74, 0x1e, 0x06, 0x6e, 0x8a, 0xbf, 0x3d, 0x37, 0x25, 0xb2, 0x0f, 0x3a, 0x67, 0xff, 0x61, 0x44,
0x3b, 0xd4, 0xbb, 0x0c, 0x9f, 0xc3, 0x81, 0xe3, 0x0e, 0x7a, 0xf6, 0xea, 0xc1, 0x9f, 0x4f, 0x71,
0xf5, 0x0a, 0x83, 0x39, 0x0a, 0x4b, 0xba, 0x3d, 0xdf, 0x41, 0xb3, 0x29, 0x8c, 0xb7, 0x68, 0x84,
0xce, 0xca, 0x1a, 0x6f, 0x37, 0xec, 0xfe, 0x62, 0x57, 0xf0, 0x6f, 0x8b, 0xa5, 0x46, 0xd1, 0xcc,
0xd8, 0xa5, 0x7d, 0x47, 0x4f, 0x3a, 0xf6, 0xea, 0xe8, 0xce, 0x36, 0x10, 0xd4, 0x15, 0x31, 0x06,
0x81, 0xe2, 0x05, 0xb6, 0xd1, 0xee, 0xbb, 0x6e, 0x5c, 0x90, 0xb2, 0xa8, 0x6c, 0x1b, 0xb9, 0x97,
0xec, 0x1c, 0x46, 0xa6, 0xda, 0x34, 0xa5, 0xfb, 0x5f, 0x94, 0x7e, 0x20, 0x96, 0x16, 0x7e, 0x09,
0x2a, 0x3a, 0xc0, 0x92, 0xf5, 0xce, 0x9f, 0xd4, 0xb7, 0x9f, 0x78, 0x8f, 0xb7, 0x2d, 0x21, 0x29,
0xe7, 0x4a, 0x46, 0xa4, 0x65, 0x2c, 0x51, 0xb9, 0xb7, 0x11, 0x37, 0x16, 0x2f, 0x33, 0xe3, 0x9e,
0x8f, 0xa9, 0xbb, 0x14, 0x28, 0x48, 0x3d, 0x65, 0xf2, 0xba, 0xa7, 0xde, 0x06, 0xc1, 0xdd, 0x4d,
0x72, 0xbf, 0xf9, 0xe6, 0x16, 0x2e, 0x3e, 0x02, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x04, 0x32, 0xbf,
0x76, 0x02, 0x00, 0x00,
}

View File

@ -0,0 +1,130 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/endpoint.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// `Endpoint` describes a network endpoint that serves a set of APIs.
// A service may expose any number of endpoints, and all endpoints share the
// same service configuration, such as quota configuration and monitoring
// configuration.
//
// Example service configuration:
//
// name: library-example.googleapis.com
// endpoints:
// # Below entry makes 'google.example.library.v1.Library'
// # API be served from endpoint address library-example.googleapis.com.
// # It also allows HTTP OPTIONS calls to be passed to the backend, for
// # it to decide whether the subsequent cross-origin request is
// # allowed to proceed.
// - name: library-example.googleapis.com
// allow_cors: true
type Endpoint struct {
// The canonical name of this endpoint.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// DEPRECATED: This field is no longer supported. Instead of using aliases,
// please specify multiple [google.api.Endpoint][google.api.Endpoint] for each of the intented
// alias.
//
// Additional names that this endpoint will be hosted on.
Aliases []string `protobuf:"bytes,2,rep,name=aliases" json:"aliases,omitempty"`
// The list of APIs served by this endpoint.
Apis []string `protobuf:"bytes,3,rep,name=apis" json:"apis,omitempty"`
// The list of features enabled on this endpoint.
Features []string `protobuf:"bytes,4,rep,name=features" json:"features,omitempty"`
// The specification of an Internet routable address of API frontend that will
// handle requests to this [API Endpoint](https://cloud.google.com/apis/design/glossary).
// It should be either a valid IPv4 address or a fully-qualified domain name.
// For example, "8.8.8.8" or "myservice.appspot.com".
Target string `protobuf:"bytes,101,opt,name=target" json:"target,omitempty"`
// Allowing
// [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing), aka
// cross-domain traffic, would allow the backends served from this endpoint to
// receive and respond to HTTP OPTIONS requests. The response will be used by
// the browser to determine whether the subsequent cross-origin request is
// allowed to proceed.
AllowCors bool `protobuf:"varint,5,opt,name=allow_cors,json=allowCors" json:"allow_cors,omitempty"`
}
func (m *Endpoint) Reset() { *m = Endpoint{} }
func (m *Endpoint) String() string { return proto.CompactTextString(m) }
func (*Endpoint) ProtoMessage() {}
func (*Endpoint) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} }
func (m *Endpoint) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Endpoint) GetAliases() []string {
if m != nil {
return m.Aliases
}
return nil
}
func (m *Endpoint) GetApis() []string {
if m != nil {
return m.Apis
}
return nil
}
func (m *Endpoint) GetFeatures() []string {
if m != nil {
return m.Features
}
return nil
}
func (m *Endpoint) GetTarget() string {
if m != nil {
return m.Target
}
return ""
}
func (m *Endpoint) GetAllowCors() bool {
if m != nil {
return m.AllowCors
}
return false
}
func init() {
proto.RegisterType((*Endpoint)(nil), "google.api.Endpoint")
}
func init() { proto.RegisterFile("google/api/endpoint.proto", fileDescriptor7) }
var fileDescriptor7 = []byte{
// 253 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x41, 0x4b, 0xc4, 0x30,
0x10, 0x85, 0xe9, 0x6e, 0x5d, 0xdb, 0x01, 0x3d, 0xe4, 0x20, 0x71, 0x51, 0x28, 0x9e, 0x7a, 0x6a,
0x0f, 0x1e, 0x3d, 0xb9, 0xb2, 0x88, 0xb7, 0xd2, 0xa3, 0x17, 0x19, 0xeb, 0x6c, 0x08, 0x64, 0x33,
0x21, 0x89, 0xfa, 0x73, 0x04, 0x7f, 0xa9, 0x34, 0xed, 0xaa, 0x7b, 0x9b, 0xef, 0xbd, 0x37, 0x61,
0x5e, 0xe0, 0x52, 0x31, 0x2b, 0x43, 0x2d, 0x3a, 0xdd, 0x92, 0x7d, 0x73, 0xac, 0x6d, 0x6c, 0x9c,
0xe7, 0xc8, 0x02, 0x26, 0xab, 0x41, 0xa7, 0xd7, 0x57, 0xff, 0x62, 0x68, 0x2d, 0x47, 0x8c, 0x9a,
0x6d, 0x98, 0x92, 0x37, 0x5f, 0x19, 0x14, 0xdb, 0x79, 0x59, 0x08, 0xc8, 0x2d, 0xee, 0x49, 0x66,
0x55, 0x56, 0x97, 0x7d, 0x9a, 0x85, 0x84, 0x53, 0x34, 0x1a, 0x03, 0x05, 0xb9, 0xa8, 0x96, 0x75,
0xd9, 0x1f, 0x70, 0x4c, 0xa3, 0xd3, 0x41, 0x2e, 0x93, 0x9c, 0x66, 0xb1, 0x86, 0x62, 0x47, 0x18,
0xdf, 0x3d, 0x05, 0x99, 0x27, 0xfd, 0x97, 0xc5, 0x05, 0xac, 0x22, 0x7a, 0x45, 0x51, 0x52, 0x7a,
0x7f, 0x26, 0x71, 0x0d, 0x80, 0xc6, 0xf0, 0xe7, 0xcb, 0xc0, 0x3e, 0xc8, 0x93, 0x2a, 0xab, 0x8b,
0xbe, 0x4c, 0xca, 0x03, 0xfb, 0xb0, 0x61, 0x38, 0x1f, 0x78, 0xdf, 0xfc, 0x35, 0xda, 0x9c, 0x1d,
0x0e, 0xee, 0xc6, 0x0a, 0x5d, 0xf6, 0xbc, 0x9d, 0x4d, 0xc5, 0x06, 0xad, 0x6a, 0xd8, 0xab, 0x56,
0x91, 0x4d, 0x05, 0xdb, 0xc9, 0x1a, 0x8f, 0x4b, 0x3f, 0x10, 0xc8, 0x7f, 0xe8, 0x81, 0x06, 0xb6,
0x3b, 0xad, 0xee, 0x8e, 0xe8, 0x7b, 0x91, 0x3f, 0xde, 0x77, 0x4f, 0xaf, 0xab, 0xb4, 0x78, 0xfb,
0x13, 0x00, 0x00, 0xff, 0xff, 0x34, 0x0e, 0xdd, 0x70, 0x60, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,98 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/log.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_api2 "google.golang.org/genproto/googleapis/api/label"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// A description of a log type. Example in YAML format:
//
// - name: library.googleapis.com/activity_history
// description: The history of borrowing and returning library items.
// display_name: Activity
// labels:
// - key: /customer_id
// description: Identifier of a library customer
type LogDescriptor struct {
// The name of the log. It must be less than 512 characters long and can
// include the following characters: upper- and lower-case alphanumeric
// characters [A-Za-z0-9], and punctuation characters including
// slash, underscore, hyphen, period [/_-.].
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// The set of labels that are available to describe a specific log entry.
// Runtime requests that contain labels not specified here are
// considered invalid.
Labels []*google_api2.LabelDescriptor `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty"`
// A human-readable description of this log. This information appears in
// the documentation and can contain details.
Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
// The human-readable name for this log. This information appears on
// the user interface and should be concise.
DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
}
func (m *LogDescriptor) Reset() { *m = LogDescriptor{} }
func (m *LogDescriptor) String() string { return proto.CompactTextString(m) }
func (*LogDescriptor) ProtoMessage() {}
func (*LogDescriptor) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} }
func (m *LogDescriptor) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *LogDescriptor) GetLabels() []*google_api2.LabelDescriptor {
if m != nil {
return m.Labels
}
return nil
}
func (m *LogDescriptor) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *LogDescriptor) GetDisplayName() string {
if m != nil {
return m.DisplayName
}
return ""
}
func init() {
proto.RegisterType((*LogDescriptor)(nil), "google.api.LogDescriptor")
}
func init() { proto.RegisterFile("google/api/log.proto", fileDescriptor8) }
var fileDescriptor8 = []byte{
// 238 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0xc1, 0x4a, 0xc3, 0x40,
0x10, 0x86, 0x49, 0x1b, 0x8a, 0x6e, 0xd5, 0xc3, 0x22, 0x12, 0xf4, 0x12, 0x3d, 0xf5, 0xb4, 0x01,
0x7b, 0xf4, 0x64, 0x51, 0x44, 0x08, 0x12, 0x7a, 0xf4, 0x22, 0xd3, 0x74, 0x1c, 0x46, 0x36, 0x3b,
0xcb, 0x6e, 0x11, 0x7c, 0x18, 0x2f, 0x3e, 0xa9, 0x74, 0x13, 0x68, 0x7a, 0xdb, 0xfd, 0xe6, 0x9b,
0x7f, 0x66, 0xd4, 0x25, 0x89, 0x90, 0xc5, 0x0a, 0x3c, 0x57, 0x56, 0xc8, 0xf8, 0x20, 0x3b, 0xd1,
0xaa, 0xa7, 0x06, 0x3c, 0x5f, 0x5f, 0x8d, 0x0d, 0xd8, 0xa0, 0xed, 0x9d, 0xbb, 0xdf, 0x4c, 0x9d,
0xd7, 0x42, 0x4f, 0x18, 0xdb, 0xc0, 0x7e, 0x27, 0x41, 0x6b, 0x95, 0x3b, 0xe8, 0xb0, 0xc8, 0xca,
0x6c, 0x71, 0xba, 0x4e, 0x6f, 0xbd, 0x54, 0xb3, 0xd4, 0x14, 0x8b, 0x49, 0x39, 0x5d, 0xcc, 0xef,
0x6f, 0xcc, 0x21, 0xda, 0xd4, 0xfb, 0xca, 0x21, 0x60, 0x3d, 0xa8, 0xba, 0x54, 0xf3, 0xed, 0x40,
0x59, 0x5c, 0x31, 0x4d, 0x79, 0x63, 0xa4, 0x6f, 0xd5, 0xd9, 0x96, 0xa3, 0xb7, 0xf0, 0xf3, 0x91,
0x46, 0xe6, 0x83, 0xd2, 0xb3, 0x37, 0xe8, 0x70, 0xf5, 0xa5, 0x2e, 0x5a, 0xe9, 0x46, 0xe3, 0x56,
0x27, 0xb5, 0x50, 0xb3, 0xdf, 0xbd, 0xc9, 0xde, 0x9f, 0x07, 0x4e, 0x62, 0xc1, 0x91, 0x91, 0x40,
0x15, 0xa1, 0x4b, 0x97, 0x55, 0x7d, 0x09, 0x3c, 0xc7, 0x74, 0x74, 0xc4, 0xf0, 0xcd, 0x2d, 0xb6,
0xe2, 0x3e, 0x99, 0x1e, 0x8e, 0x7e, 0x7f, 0x93, 0xfc, 0xe5, 0xb1, 0x79, 0xdd, 0xcc, 0x52, 0xe3,
0xf2, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x25, 0x6c, 0x32, 0xff, 0x4e, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,135 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/logging.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Logging configuration of the service.
//
// The following example shows how to configure logs to be sent to the
// producer and consumer projects. In the example, the `activity_history`
// log is sent to both the producer and consumer projects, whereas the
// `purchase_history` log is only sent to the producer project.
//
// monitored_resources:
// - type: library.googleapis.com/branch
// labels:
// - key: /city
// description: The city where the library branch is located in.
// - key: /name
// description: The name of the branch.
// logs:
// - name: activity_history
// labels:
// - key: /customer_id
// - name: purchase_history
// logging:
// producer_destinations:
// - monitored_resource: library.googleapis.com/branch
// logs:
// - activity_history
// - purchase_history
// consumer_destinations:
// - monitored_resource: library.googleapis.com/branch
// logs:
// - activity_history
type Logging struct {
// Logging configurations for sending logs to the producer project.
// There can be multiple producer destinations, each one must have a
// different monitored resource type. A log can be used in at most
// one producer destination.
ProducerDestinations []*Logging_LoggingDestination `protobuf:"bytes,1,rep,name=producer_destinations,json=producerDestinations" json:"producer_destinations,omitempty"`
// Logging configurations for sending logs to the consumer project.
// There can be multiple consumer destinations, each one must have a
// different monitored resource type. A log can be used in at most
// one consumer destination.
ConsumerDestinations []*Logging_LoggingDestination `protobuf:"bytes,2,rep,name=consumer_destinations,json=consumerDestinations" json:"consumer_destinations,omitempty"`
}
func (m *Logging) Reset() { *m = Logging{} }
func (m *Logging) String() string { return proto.CompactTextString(m) }
func (*Logging) ProtoMessage() {}
func (*Logging) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{0} }
func (m *Logging) GetProducerDestinations() []*Logging_LoggingDestination {
if m != nil {
return m.ProducerDestinations
}
return nil
}
func (m *Logging) GetConsumerDestinations() []*Logging_LoggingDestination {
if m != nil {
return m.ConsumerDestinations
}
return nil
}
// Configuration of a specific logging destination (the producer project
// or the consumer project).
type Logging_LoggingDestination struct {
// The monitored resource type. The type must be defined in the
// [Service.monitored_resources][google.api.Service.monitored_resources] section.
MonitoredResource string `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource" json:"monitored_resource,omitempty"`
// Names of the logs to be sent to this destination. Each name must
// be defined in the [Service.logs][google.api.Service.logs] section. If the log name is
// not a domain scoped name, it will be automatically prefixed with
// the service name followed by "/".
Logs []string `protobuf:"bytes,1,rep,name=logs" json:"logs,omitempty"`
}
func (m *Logging_LoggingDestination) Reset() { *m = Logging_LoggingDestination{} }
func (m *Logging_LoggingDestination) String() string { return proto.CompactTextString(m) }
func (*Logging_LoggingDestination) ProtoMessage() {}
func (*Logging_LoggingDestination) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{0, 0} }
func (m *Logging_LoggingDestination) GetMonitoredResource() string {
if m != nil {
return m.MonitoredResource
}
return ""
}
func (m *Logging_LoggingDestination) GetLogs() []string {
if m != nil {
return m.Logs
}
return nil
}
func init() {
proto.RegisterType((*Logging)(nil), "google.api.Logging")
proto.RegisterType((*Logging_LoggingDestination)(nil), "google.api.Logging.LoggingDestination")
}
func init() { proto.RegisterFile("google/api/logging.proto", fileDescriptor9) }
var fileDescriptor9 = []byte{
// 270 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x4f, 0x4b, 0xc4, 0x30,
0x10, 0xc5, 0x69, 0x77, 0x51, 0x36, 0x8a, 0x60, 0x50, 0x28, 0x8b, 0x87, 0xc5, 0x83, 0xec, 0xc5,
0x14, 0xf4, 0xe8, 0xc9, 0x45, 0x11, 0xc1, 0x43, 0xe9, 0x45, 0xd0, 0xc3, 0x12, 0xd3, 0x38, 0x04,
0xda, 0x99, 0x90, 0xa4, 0x7e, 0x1a, 0x4f, 0x7e, 0x52, 0xd9, 0xa6, 0x75, 0xab, 0x9e, 0xf6, 0x94,
0x3f, 0xef, 0xbd, 0x5f, 0x32, 0x8f, 0x65, 0x40, 0x04, 0xb5, 0xce, 0xa5, 0x35, 0x79, 0x4d, 0x00,
0x06, 0x41, 0x58, 0x47, 0x81, 0x38, 0x8b, 0x8a, 0x90, 0xd6, 0xcc, 0xcf, 0x46, 0x2e, 0x89, 0x48,
0x41, 0x06, 0x43, 0xe8, 0xa3, 0xf3, 0xfc, 0x33, 0x65, 0xfb, 0x4f, 0x31, 0xcb, 0x5f, 0xd9, 0xa9,
0x75, 0x54, 0xb5, 0x4a, 0xbb, 0x75, 0xa5, 0x7d, 0x30, 0x18, 0xad, 0x59, 0xb2, 0x98, 0x2c, 0x0f,
0xae, 0x2e, 0xc4, 0x96, 0x2a, 0xfa, 0xcc, 0xb0, 0xde, 0x6d, 0xed, 0xe5, 0xc9, 0x00, 0x19, 0x5d,
0xfa, 0x0d, 0x5c, 0x11, 0xfa, 0xb6, 0xf9, 0x0b, 0x4f, 0x77, 0x83, 0x0f, 0x90, 0x31, 0x7c, 0xfe,
0xcc, 0xf8, 0x7f, 0x2f, 0xbf, 0x64, 0xbc, 0x21, 0x34, 0x81, 0x9c, 0xae, 0xd6, 0x4e, 0x7b, 0x6a,
0x9d, 0xd2, 0xd9, 0x64, 0x91, 0x2c, 0x67, 0xe5, 0xf1, 0x8f, 0x52, 0xf6, 0x02, 0xe7, 0x6c, 0x5a,
0x13, 0xc4, 0x69, 0x67, 0x65, 0xb7, 0x5f, 0x21, 0x3b, 0x52, 0xd4, 0x8c, 0xfe, 0xb6, 0x3a, 0xec,
0x1f, 0x2a, 0x36, 0xf5, 0x15, 0xc9, 0xcb, 0x7d, 0xaf, 0x01, 0xd5, 0x12, 0x41, 0x90, 0x83, 0x1c,
0x34, 0x76, 0xe5, 0xe6, 0x51, 0x92, 0xd6, 0xf8, 0xae, 0x7d, 0xaf, 0xdd, 0x87, 0x51, 0x5a, 0x11,
0xbe, 0x1b, 0xb8, 0xf9, 0x75, 0xfa, 0x4a, 0xa7, 0x0f, 0xb7, 0xc5, 0xe3, 0xdb, 0x5e, 0x17, 0xbc,
0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x73, 0x4f, 0x86, 0x6e, 0xdb, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,143 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/monitoring.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Monitoring configuration of the service.
//
// The example below shows how to configure monitored resources and metrics
// for monitoring. In the example, a monitored resource and two metrics are
// defined. The `library.googleapis.com/book/returned_count` metric is sent
// to both producer and consumer projects, whereas the
// `library.googleapis.com/book/overdue_count` metric is only sent to the
// consumer project.
//
// monitored_resources:
// - type: library.googleapis.com/branch
// labels:
// - key: /city
// description: The city where the library branch is located in.
// - key: /name
// description: The name of the branch.
// metrics:
// - name: library.googleapis.com/book/returned_count
// metric_kind: DELTA
// value_type: INT64
// labels:
// - key: /customer_id
// - name: library.googleapis.com/book/overdue_count
// metric_kind: GAUGE
// value_type: INT64
// labels:
// - key: /customer_id
// monitoring:
// producer_destinations:
// - monitored_resource: library.googleapis.com/branch
// metrics:
// - library.googleapis.com/book/returned_count
// consumer_destinations:
// - monitored_resource: library.googleapis.com/branch
// metrics:
// - library.googleapis.com/book/returned_count
// - library.googleapis.com/book/overdue_count
type Monitoring struct {
// Monitoring configurations for sending metrics to the producer project.
// There can be multiple producer destinations, each one must have a
// different monitored resource type. A metric can be used in at most
// one producer destination.
ProducerDestinations []*Monitoring_MonitoringDestination `protobuf:"bytes,1,rep,name=producer_destinations,json=producerDestinations" json:"producer_destinations,omitempty"`
// Monitoring configurations for sending metrics to the consumer project.
// There can be multiple consumer destinations, each one must have a
// different monitored resource type. A metric can be used in at most
// one consumer destination.
ConsumerDestinations []*Monitoring_MonitoringDestination `protobuf:"bytes,2,rep,name=consumer_destinations,json=consumerDestinations" json:"consumer_destinations,omitempty"`
}
func (m *Monitoring) Reset() { *m = Monitoring{} }
func (m *Monitoring) String() string { return proto.CompactTextString(m) }
func (*Monitoring) ProtoMessage() {}
func (*Monitoring) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0} }
func (m *Monitoring) GetProducerDestinations() []*Monitoring_MonitoringDestination {
if m != nil {
return m.ProducerDestinations
}
return nil
}
func (m *Monitoring) GetConsumerDestinations() []*Monitoring_MonitoringDestination {
if m != nil {
return m.ConsumerDestinations
}
return nil
}
// Configuration of a specific monitoring destination (the producer project
// or the consumer project).
type Monitoring_MonitoringDestination struct {
// The monitored resource type. The type must be defined in
// [Service.monitored_resources][google.api.Service.monitored_resources] section.
MonitoredResource string `protobuf:"bytes,1,opt,name=monitored_resource,json=monitoredResource" json:"monitored_resource,omitempty"`
// Names of the metrics to report to this monitoring destination.
// Each name must be defined in [Service.metrics][google.api.Service.metrics] section.
Metrics []string `protobuf:"bytes,2,rep,name=metrics" json:"metrics,omitempty"`
}
func (m *Monitoring_MonitoringDestination) Reset() { *m = Monitoring_MonitoringDestination{} }
func (m *Monitoring_MonitoringDestination) String() string { return proto.CompactTextString(m) }
func (*Monitoring_MonitoringDestination) ProtoMessage() {}
func (*Monitoring_MonitoringDestination) Descriptor() ([]byte, []int) {
return fileDescriptor10, []int{0, 0}
}
func (m *Monitoring_MonitoringDestination) GetMonitoredResource() string {
if m != nil {
return m.MonitoredResource
}
return ""
}
func (m *Monitoring_MonitoringDestination) GetMetrics() []string {
if m != nil {
return m.Metrics
}
return nil
}
func init() {
proto.RegisterType((*Monitoring)(nil), "google.api.Monitoring")
proto.RegisterType((*Monitoring_MonitoringDestination)(nil), "google.api.Monitoring.MonitoringDestination")
}
func init() { proto.RegisterFile("google/api/monitoring.proto", fileDescriptor10) }
var fileDescriptor10 = []byte{
// 271 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0xcd, 0x4a, 0xc4, 0x30,
0x10, 0xa6, 0x55, 0x94, 0x8d, 0xa0, 0x58, 0x5c, 0x28, 0xab, 0x87, 0xc5, 0xd3, 0x1e, 0xb4, 0x05,
0x3d, 0x7a, 0x72, 0x51, 0xc4, 0x83, 0x50, 0x7a, 0xf4, 0xb2, 0xc6, 0x74, 0x0c, 0x03, 0xdb, 0x99,
0x9a, 0xa4, 0x3e, 0x90, 0xcf, 0xe0, 0x03, 0xca, 0x36, 0xed, 0x36, 0x8a, 0x27, 0x6f, 0x99, 0x7c,
0x7f, 0xc3, 0x37, 0xe2, 0x54, 0x33, 0xeb, 0x35, 0xe4, 0xb2, 0xc1, 0xbc, 0x66, 0x42, 0xc7, 0x06,
0x49, 0x67, 0x8d, 0x61, 0xc7, 0x89, 0xf0, 0x60, 0x26, 0x1b, 0x9c, 0x9d, 0x05, 0x44, 0x49, 0xc4,
0x4e, 0x3a, 0x64, 0xb2, 0x9e, 0x79, 0xfe, 0x15, 0x0b, 0xf1, 0xb4, 0x95, 0x27, 0x52, 0x4c, 0x1b,
0xc3, 0x55, 0xab, 0xc0, 0xac, 0x2a, 0xb0, 0x0e, 0xc9, 0xb3, 0xd3, 0x68, 0xbe, 0xb3, 0x38, 0xb8,
0xba, 0xc8, 0x46, 0xe3, 0x6c, 0x94, 0x05, 0xcf, 0xbb, 0x51, 0x54, 0x9e, 0x0c, 0x56, 0xc1, 0xa7,
0xdd, 0x44, 0x28, 0x26, 0xdb, 0xd6, 0xbf, 0x23, 0xe2, 0xff, 0x44, 0x0c, 0x56, 0x61, 0xc4, 0xec,
0x45, 0x4c, 0xff, 0xa4, 0x27, 0x97, 0x22, 0xe9, 0xbb, 0x82, 0x6a, 0x65, 0xc0, 0x72, 0x6b, 0x14,
0xa4, 0xd1, 0x3c, 0x5a, 0x4c, 0xca, 0xe3, 0x2d, 0x52, 0xf6, 0x40, 0x92, 0x8a, 0xfd, 0x1a, 0x9c,
0x41, 0xe5, 0x97, 0x9b, 0x94, 0xc3, 0xb8, 0x7c, 0x17, 0x87, 0x8a, 0xeb, 0x60, 0xd5, 0xe5, 0xd1,
0x98, 0x58, 0x6c, 0x9a, 0x2d, 0xa2, 0xe7, 0xfb, 0x1e, 0xd6, 0xbc, 0x96, 0xa4, 0x33, 0x36, 0x3a,
0xd7, 0x40, 0x5d, 0xef, 0xb9, 0x87, 0x64, 0x83, 0xb6, 0x3b, 0x8c, 0x05, 0xf3, 0x81, 0x0a, 0x14,
0xd3, 0x1b, 0xea, 0x9b, 0x1f, 0xd3, 0x67, 0xbc, 0xfb, 0x70, 0x5b, 0x3c, 0xbe, 0xee, 0x75, 0xc2,
0xeb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0x35, 0xf3, 0xe2, 0xf9, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,391 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/quota.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Quota configuration helps to achieve fairness and budgeting in service
// usage.
//
// The quota configuration works this way:
// - The service configuration defines a set of metrics.
// - For API calls, the quota.metric_rules maps methods to metrics with
// corresponding costs.
// - The quota.limits defines limits on the metrics, which will be used for
// quota checks at runtime.
//
// An example quota configuration in yaml format:
//
// quota:
// limits:
//
// - name: apiWriteQpsPerProject
// metric: library.googleapis.com/write_calls
// unit: "1/min/{project}" # rate limit for consumer projects
// values:
// STANDARD: 10000
//
// # The metric rules bind all methods to the read_calls metric,
// # except for the UpdateBook and DeleteBook methods. These two methods
// # are mapped to the write_calls metric, with the UpdateBook method
// # consuming at twice rate as the DeleteBook method.
// metric_rules:
// - selector: "*"
// metric_costs:
// library.googleapis.com/read_calls: 1
// - selector: google.example.library.v1.LibraryService.UpdateBook
// metric_costs:
// library.googleapis.com/write_calls: 2
// - selector: google.example.library.v1.LibraryService.DeleteBook
// metric_costs:
// library.googleapis.com/write_calls: 1
//
// Corresponding Metric definition:
//
// metrics:
// - name: library.googleapis.com/read_calls
// display_name: Read requests
// metric_kind: DELTA
// value_type: INT64
//
// - name: library.googleapis.com/write_calls
// display_name: Write requests
// metric_kind: DELTA
// value_type: INT64
//
type Quota struct {
// List of `QuotaLimit` definitions for the service.
//
// Used by metric-based quotas only.
Limits []*QuotaLimit `protobuf:"bytes,3,rep,name=limits" json:"limits,omitempty"`
// List of `MetricRule` definitions, each one mapping a selected method to one
// or more metrics.
//
// Used by metric-based quotas only.
MetricRules []*MetricRule `protobuf:"bytes,4,rep,name=metric_rules,json=metricRules" json:"metric_rules,omitempty"`
}
func (m *Quota) Reset() { *m = Quota{} }
func (m *Quota) String() string { return proto.CompactTextString(m) }
func (*Quota) ProtoMessage() {}
func (*Quota) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{0} }
func (m *Quota) GetLimits() []*QuotaLimit {
if m != nil {
return m.Limits
}
return nil
}
func (m *Quota) GetMetricRules() []*MetricRule {
if m != nil {
return m.MetricRules
}
return nil
}
// Bind API methods to metrics. Binding a method to a metric causes that
// metric's configured quota, billing, and monitoring behaviors to apply to the
// method call.
//
// Used by metric-based quotas only.
type MetricRule struct {
// Selects the methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
// Metrics to update when the selected methods are called, and the associated
// cost applied to each metric.
//
// The key of the map is the metric name, and the values are the amount
// increased for the metric against which the quota limits are defined.
// The value must not be negative.
MetricCosts map[string]int64 `protobuf:"bytes,2,rep,name=metric_costs,json=metricCosts" json:"metric_costs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
}
func (m *MetricRule) Reset() { *m = MetricRule{} }
func (m *MetricRule) String() string { return proto.CompactTextString(m) }
func (*MetricRule) ProtoMessage() {}
func (*MetricRule) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{1} }
func (m *MetricRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *MetricRule) GetMetricCosts() map[string]int64 {
if m != nil {
return m.MetricCosts
}
return nil
}
// `QuotaLimit` defines a specific limit that applies over a specified duration
// for a limit type. There can be at most one limit for a duration and limit
// type combination defined within a `QuotaGroup`.
type QuotaLimit struct {
// Name of the quota limit. The name is used to refer to the limit when
// overriding the default limit on per-consumer basis.
//
// For group-based quota limits, the name must be unique within the quota
// group. If a name is not provided, it will be generated from the limit_by
// and duration fields.
//
// For metric-based quota limits, the name must be provided, and it must be
// unique within the service. The name can only include alphanumeric
// characters as well as '-'.
//
// The maximum length of the limit name is 64 characters.
//
// The name of a limit is used as a unique identifier for this limit.
// Therefore, once a limit has been put into use, its name should be
// immutable. You can use the display_name field to provide a user-friendly
// name for the limit. The display name can be evolved over time without
// affecting the identity of the limit.
Name string `protobuf:"bytes,6,opt,name=name" json:"name,omitempty"`
// Optional. User-visible, extended description for this quota limit.
// Should be used only when more context is needed to understand this limit
// than provided by the limit's display name (see: `display_name`).
Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
// Default number of tokens that can be consumed during the specified
// duration. This is the number of tokens assigned when a client
// application developer activates the service for his/her project.
//
// Specifying a value of 0 will block all requests. This can be used if you
// are provisioning quota to selected consumers and blocking others.
// Similarly, a value of -1 will indicate an unlimited quota. No other
// negative values are allowed.
//
// Used by group-based quotas only.
DefaultLimit int64 `protobuf:"varint,3,opt,name=default_limit,json=defaultLimit" json:"default_limit,omitempty"`
// Maximum number of tokens that can be consumed during the specified
// duration. Client application developers can override the default limit up
// to this maximum. If specified, this value cannot be set to a value less
// than the default limit. If not specified, it is set to the default limit.
//
// To allow clients to apply overrides with no upper bound, set this to -1,
// indicating unlimited maximum quota.
//
// Used by group-based quotas only.
MaxLimit int64 `protobuf:"varint,4,opt,name=max_limit,json=maxLimit" json:"max_limit,omitempty"`
// Free tier value displayed in the Developers Console for this limit.
// The free tier is the number of tokens that will be subtracted from the
// billed amount when billing is enabled.
// This field can only be set on a limit with duration "1d", in a billable
// group; it is invalid on any other limit. If this field is not set, it
// defaults to 0, indicating that there is no free tier for this service.
//
// Used by group-based quotas only.
FreeTier int64 `protobuf:"varint,7,opt,name=free_tier,json=freeTier" json:"free_tier,omitempty"`
// Duration of this limit in textual notation. Example: "100s", "24h", "1d".
// For duration longer than a day, only multiple of days is supported. We
// support only "100s" and "1d" for now. Additional support will be added in
// the future. "0" indicates indefinite duration.
//
// Used by group-based quotas only.
Duration string `protobuf:"bytes,5,opt,name=duration" json:"duration,omitempty"`
// The name of the metric this quota limit applies to. The quota limits with
// the same metric will be checked together during runtime. The metric must be
// defined within the service config.
//
// Used by metric-based quotas only.
Metric string `protobuf:"bytes,8,opt,name=metric" json:"metric,omitempty"`
// Specify the unit of the quota limit. It uses the same syntax as
// [Metric.unit][]. The supported unit kinds are determined by the quota
// backend system.
//
// The [Google Service Control](https://cloud.google.com/service-control)
// supports the following unit components:
// * One of the time intevals:
// * "/min" for quota every minute.
// * "/d" for quota every 24 hours, starting 00:00 US Pacific Time.
// * Otherwise the quota won't be reset by time, such as storage limit.
// * One and only one of the granted containers:
// * "/{organization}" quota for an organization.
// * "/{project}" quota for a project.
// * "/{folder}" quota for a folder.
// * "/{resource}" quota for a universal resource.
// * Zero or more quota segmentation dimension. Not all combos are valid.
// * "/{region}" quota for every region. Not to be used with time intervals.
// * Otherwise the resources granted on the target is not segmented.
// * "/{zone}" quota for every zone. Not to be used with time intervals.
// * Otherwise the resources granted on the target is not segmented.
// * "/{resource}" quota for a resource associated with a project or org.
//
// Here are some examples:
// * "1/min/{project}" for quota per minute per project.
// * "1/min/{user}" for quota per minute per user.
// * "1/min/{organization}" for quota per minute per organization.
//
// Note: the order of unit components is insignificant.
// The "1" at the beginning is required to follow the metric unit syntax.
//
// Used by metric-based quotas only.
Unit string `protobuf:"bytes,9,opt,name=unit" json:"unit,omitempty"`
// Tiered limit values. Also allows for regional or zone overrides for these
// values if "/{region}" or "/{zone}" is specified in the unit field.
//
// Currently supported tiers from low to high:
// VERY_LOW, LOW, STANDARD, HIGH, VERY_HIGH
//
// To apply different limit values for users according to their tiers, specify
// the values for the tiers you want to differentiate. For example:
// {LOW:100, STANDARD:500, HIGH:1000, VERY_HIGH:5000}
//
// The limit value for each tier is optional except for the tier STANDARD.
// The limit value for an unspecified tier falls to the value of its next
// tier towards tier STANDARD. For the above example, the limit value for tier
// STANDARD is 500.
//
// To apply the same limit value for all users, just specify limit value for
// tier STANDARD. For example: {STANDARD:500}.
//
// To apply a regional overide for a tier, add a map entry with key
// "<TIER>/<region>", where <region> is a region name. Similarly, for a zone
// override, add a map entry with key "<TIER>/{zone}".
// Further, a wildcard can be used at the end of a zone name in order to
// specify zone level overrides. For example:
// LOW: 10, STANDARD: 50, HIGH: 100,
// LOW/us-central1: 20, STANDARD/us-central1: 60, HIGH/us-central1: 200,
// LOW/us-central1-*: 10, STANDARD/us-central1-*: 20, HIGH/us-central1-*: 80
//
// The regional overrides tier set for each region must be the same as
// the tier set for default limit values. Same rule applies for zone overrides
// tier as well.
//
// Used by metric-based quotas only.
Values map[string]int64 `protobuf:"bytes,10,rep,name=values" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
// User-visible display name for this limit.
// Optional. If not set, the UI will provide a default display name based on
// the quota configuration. This field can be used to override the default
// display name generated from the configuration.
DisplayName string `protobuf:"bytes,12,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
}
func (m *QuotaLimit) Reset() { *m = QuotaLimit{} }
func (m *QuotaLimit) String() string { return proto.CompactTextString(m) }
func (*QuotaLimit) ProtoMessage() {}
func (*QuotaLimit) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{2} }
func (m *QuotaLimit) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *QuotaLimit) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *QuotaLimit) GetDefaultLimit() int64 {
if m != nil {
return m.DefaultLimit
}
return 0
}
func (m *QuotaLimit) GetMaxLimit() int64 {
if m != nil {
return m.MaxLimit
}
return 0
}
func (m *QuotaLimit) GetFreeTier() int64 {
if m != nil {
return m.FreeTier
}
return 0
}
func (m *QuotaLimit) GetDuration() string {
if m != nil {
return m.Duration
}
return ""
}
func (m *QuotaLimit) GetMetric() string {
if m != nil {
return m.Metric
}
return ""
}
func (m *QuotaLimit) GetUnit() string {
if m != nil {
return m.Unit
}
return ""
}
func (m *QuotaLimit) GetValues() map[string]int64 {
if m != nil {
return m.Values
}
return nil
}
func (m *QuotaLimit) GetDisplayName() string {
if m != nil {
return m.DisplayName
}
return ""
}
func init() {
proto.RegisterType((*Quota)(nil), "google.api.Quota")
proto.RegisterType((*MetricRule)(nil), "google.api.MetricRule")
proto.RegisterType((*QuotaLimit)(nil), "google.api.QuotaLimit")
}
func init() { proto.RegisterFile("google/api/quota.proto", fileDescriptor11) }
var fileDescriptor11 = []byte{
// 466 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xc1, 0x8e, 0xd3, 0x30,
0x10, 0x55, 0x9a, 0xb6, 0xb4, 0xd3, 0x82, 0x56, 0x16, 0xaa, 0xac, 0xc2, 0xa1, 0x94, 0x03, 0x3d,
0xa5, 0x12, 0x5c, 0xd8, 0x45, 0x42, 0x62, 0xd1, 0x0a, 0x81, 0x00, 0x95, 0x08, 0x71, 0xe0, 0x52,
0x99, 0x74, 0x1a, 0x59, 0x38, 0x71, 0xb0, 0x9d, 0xd5, 0xf6, 0xcc, 0x9f, 0xf0, 0x0d, 0x7c, 0x20,
0xf2, 0xd8, 0xdb, 0x16, 0xd8, 0xcb, 0xde, 0x66, 0xe6, 0xbd, 0xe7, 0x17, 0x3f, 0x4f, 0x60, 0x52,
0x6a, 0x5d, 0x2a, 0x5c, 0x8a, 0x46, 0x2e, 0x7f, 0xb4, 0xda, 0x89, 0xac, 0x31, 0xda, 0x69, 0x06,
0x61, 0x9e, 0x89, 0x46, 0x4e, 0x1f, 0x1e, 0x71, 0x44, 0x5d, 0x6b, 0x27, 0x9c, 0xd4, 0xb5, 0x0d,
0xcc, 0xb9, 0x81, 0xde, 0x27, 0x2f, 0x64, 0x19, 0xf4, 0x95, 0xac, 0xa4, 0xb3, 0x3c, 0x9d, 0xa5,
0x8b, 0xd1, 0xd3, 0x49, 0x76, 0x38, 0x23, 0x23, 0xca, 0x7b, 0x0f, 0xe7, 0x91, 0xc5, 0x4e, 0x61,
0x5c, 0xa1, 0x33, 0xb2, 0x58, 0x9b, 0x56, 0xa1, 0xe5, 0xdd, 0xff, 0x55, 0x1f, 0x08, 0xcf, 0x5b,
0x85, 0xf9, 0xa8, 0xda, 0xd7, 0x76, 0xfe, 0x3b, 0x01, 0x38, 0x60, 0x6c, 0x0a, 0x03, 0x8b, 0x0a,
0x0b, 0xa7, 0x0d, 0x4f, 0x66, 0xc9, 0x62, 0x98, 0xef, 0x7b, 0xf6, 0x6e, 0xef, 0x52, 0x68, 0xeb,
0x2c, 0xef, 0x90, 0xcb, 0x93, 0x9b, 0x5d, 0x62, 0xf9, 0xda, 0x33, 0x2f, 0x6a, 0x67, 0x76, 0xd7,
0xb6, 0x34, 0x99, 0xbe, 0x84, 0x93, 0x7f, 0x09, 0xec, 0x04, 0xd2, 0xef, 0xb8, 0x8b, 0xb6, 0xbe,
0x64, 0xf7, 0xa1, 0x77, 0x29, 0x54, 0x8b, 0xbc, 0x33, 0x4b, 0x16, 0x69, 0x1e, 0x9a, 0xb3, 0xce,
0xf3, 0x64, 0xfe, 0x33, 0x05, 0x38, 0x04, 0xc1, 0x18, 0x74, 0x6b, 0x51, 0x21, 0xef, 0x93, 0x96,
0x6a, 0x36, 0x83, 0xd1, 0x06, 0x6d, 0x61, 0x64, 0xe3, 0x33, 0xa6, 0x23, 0x86, 0xf9, 0xf1, 0x88,
0x3d, 0x86, 0xbb, 0x1b, 0xdc, 0x8a, 0x56, 0xb9, 0x35, 0x05, 0xc9, 0x53, 0xb2, 0x19, 0xc7, 0x61,
0x38, 0xfa, 0x01, 0x0c, 0x2b, 0x71, 0x15, 0x09, 0x5d, 0x22, 0x0c, 0x2a, 0x71, 0xb5, 0x07, 0xb7,
0x06, 0x71, 0xed, 0x24, 0x1a, 0x7e, 0x27, 0x80, 0x7e, 0xf0, 0x59, 0xa2, 0xf1, 0x59, 0x6e, 0x5a,
0x43, 0x2f, 0xcc, 0x7b, 0x21, 0xcb, 0xeb, 0x9e, 0x4d, 0xa0, 0x1f, 0xe2, 0xe0, 0x03, 0x42, 0x62,
0xe7, 0x2f, 0xd2, 0xd6, 0xd2, 0xf1, 0x61, 0xb8, 0x88, 0xaf, 0xd9, 0x19, 0xf4, 0xe9, 0xe2, 0x96,
0x03, 0x25, 0x3e, 0xbf, 0x79, 0x1b, 0xb2, 0x2f, 0x44, 0x0a, 0x61, 0x47, 0x05, 0x7b, 0x04, 0xe3,
0x8d, 0xb4, 0x8d, 0x12, 0xbb, 0x35, 0x05, 0x34, 0x8e, 0x29, 0x84, 0xd9, 0x47, 0x51, 0xe1, 0xf4,
0x14, 0x46, 0x47, 0xca, 0xdb, 0xbc, 0xc2, 0xb9, 0x82, 0x7b, 0x85, 0xae, 0x8e, 0x3e, 0xe7, 0x3c,
0x3c, 0xca, 0xca, 0xaf, 0xf3, 0x2a, 0xf9, 0x7a, 0x11, 0x91, 0x52, 0x2b, 0x51, 0x97, 0x99, 0x36,
0xe5, 0xb2, 0xc4, 0x9a, 0x96, 0x7d, 0x19, 0x20, 0xd1, 0x48, 0x4b, 0x7f, 0x83, 0x45, 0x73, 0x29,
0x0b, 0x2c, 0x74, 0xbd, 0x95, 0xe5, 0x8b, 0xbf, 0xba, 0x5f, 0x9d, 0xee, 0x9b, 0x57, 0xab, 0xb7,
0xdf, 0xfa, 0x24, 0x7c, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x90, 0x7e, 0xf5, 0xab, 0x69, 0x03,
0x00, 0x00,
}

View File

@ -0,0 +1,368 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/service.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_api5 "google.golang.org/genproto/googleapis/api"
import google_api "google.golang.org/genproto/googleapis/api/annotations"
import _ "google.golang.org/genproto/googleapis/api/label"
import google_api3 "google.golang.org/genproto/googleapis/api/metric"
import google_api6 "google.golang.org/genproto/googleapis/api/monitoredres"
import _ "github.com/golang/protobuf/ptypes/any"
import google_protobuf4 "google.golang.org/genproto/protobuf/api"
import google_protobuf3 "google.golang.org/genproto/protobuf/ptype"
import google_protobuf5 "github.com/golang/protobuf/ptypes/wrappers"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// `Service` is the root object of Google service configuration schema. It
// describes basic information about a service, such as the name and the
// title, and delegates other aspects to sub-sections. Each sub-section is
// either a proto message or a repeated proto message that configures a
// specific aspect, such as auth. See each proto message definition for details.
//
// Example:
//
// type: google.api.Service
// config_version: 3
// name: calendar.googleapis.com
// title: Google Calendar API
// apis:
// - name: google.calendar.v3.Calendar
// authentication:
// providers:
// - id: google_calendar_auth
// jwks_uri: https://www.googleapis.com/oauth2/v1/certs
// issuer: https://securetoken.google.com
// rules:
// - selector: "*"
// requirements:
// provider_id: google_calendar_auth
type Service struct {
// The version of the service configuration. The config version may
// influence interpretation of the configuration, for example, to
// determine defaults. This is documented together with applicable
// options. The current default for the config version itself is `3`.
ConfigVersion *google_protobuf5.UInt32Value `protobuf:"bytes,20,opt,name=config_version,json=configVersion" json:"config_version,omitempty"`
// The DNS address at which this service is available,
// e.g. `calendar.googleapis.com`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// A unique ID for a specific instance of this message, typically assigned
// by the client for tracking purpose. If empty, the server may choose to
// generate one instead.
Id string `protobuf:"bytes,33,opt,name=id" json:"id,omitempty"`
// The product title associated with this service.
Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"`
// The id of the Google developer project that owns the service.
// Members of this project can manage the service configuration,
// manage consumption of the service, etc.
ProducerProjectId string `protobuf:"bytes,22,opt,name=producer_project_id,json=producerProjectId" json:"producer_project_id,omitempty"`
// A list of API interfaces exported by this service. Only the `name` field
// of the [google.protobuf.Api][google.protobuf.Api] needs to be provided by the configuration
// author, as the remaining fields will be derived from the IDL during the
// normalization process. It is an error to specify an API interface here
// which cannot be resolved against the associated IDL files.
Apis []*google_protobuf4.Api `protobuf:"bytes,3,rep,name=apis" json:"apis,omitempty"`
// A list of all proto message types included in this API service.
// Types referenced directly or indirectly by the `apis` are
// automatically included. Messages which are not referenced but
// shall be included, such as types used by the `google.protobuf.Any` type,
// should be listed here by name. Example:
//
// types:
// - name: google.protobuf.Int32
Types []*google_protobuf3.Type `protobuf:"bytes,4,rep,name=types" json:"types,omitempty"`
// A list of all enum types included in this API service. Enums
// referenced directly or indirectly by the `apis` are automatically
// included. Enums which are not referenced but shall be included
// should be listed here by name. Example:
//
// enums:
// - name: google.someapi.v1.SomeEnum
Enums []*google_protobuf3.Enum `protobuf:"bytes,5,rep,name=enums" json:"enums,omitempty"`
// Additional API documentation.
Documentation *Documentation `protobuf:"bytes,6,opt,name=documentation" json:"documentation,omitempty"`
// API backend configuration.
Backend *Backend `protobuf:"bytes,8,opt,name=backend" json:"backend,omitempty"`
// HTTP configuration.
Http *google_api.Http `protobuf:"bytes,9,opt,name=http" json:"http,omitempty"`
// Quota configuration.
Quota *Quota `protobuf:"bytes,10,opt,name=quota" json:"quota,omitempty"`
// Auth configuration.
Authentication *Authentication `protobuf:"bytes,11,opt,name=authentication" json:"authentication,omitempty"`
// Context configuration.
Context *Context `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
// Configuration controlling usage of this service.
Usage *Usage `protobuf:"bytes,15,opt,name=usage" json:"usage,omitempty"`
// Configuration for network endpoints. If this is empty, then an endpoint
// with the same name as the service is automatically generated to service all
// defined APIs.
Endpoints []*Endpoint `protobuf:"bytes,18,rep,name=endpoints" json:"endpoints,omitempty"`
// Configuration for the service control plane.
Control *Control `protobuf:"bytes,21,opt,name=control" json:"control,omitempty"`
// Defines the logs used by this service.
Logs []*LogDescriptor `protobuf:"bytes,23,rep,name=logs" json:"logs,omitempty"`
// Defines the metrics used by this service.
Metrics []*google_api3.MetricDescriptor `protobuf:"bytes,24,rep,name=metrics" json:"metrics,omitempty"`
// Defines the monitored resources used by this service. This is required
// by the [Service.monitoring][google.api.Service.monitoring] and [Service.logging][google.api.Service.logging] configurations.
MonitoredResources []*google_api6.MonitoredResourceDescriptor `protobuf:"bytes,25,rep,name=monitored_resources,json=monitoredResources" json:"monitored_resources,omitempty"`
// Logging configuration.
Logging *Logging `protobuf:"bytes,27,opt,name=logging" json:"logging,omitempty"`
// Monitoring configuration.
Monitoring *Monitoring `protobuf:"bytes,28,opt,name=monitoring" json:"monitoring,omitempty"`
// System parameter configuration.
SystemParameters *SystemParameters `protobuf:"bytes,29,opt,name=system_parameters,json=systemParameters" json:"system_parameters,omitempty"`
// Output only. The source information for this configuration if available.
SourceInfo *SourceInfo `protobuf:"bytes,37,opt,name=source_info,json=sourceInfo" json:"source_info,omitempty"`
// Experimental configuration.
Experimental *google_api5.Experimental `protobuf:"bytes,101,opt,name=experimental" json:"experimental,omitempty"`
}
func (m *Service) Reset() { *m = Service{} }
func (m *Service) String() string { return proto.CompactTextString(m) }
func (*Service) ProtoMessage() {}
func (*Service) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{0} }
func (m *Service) GetConfigVersion() *google_protobuf5.UInt32Value {
if m != nil {
return m.ConfigVersion
}
return nil
}
func (m *Service) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Service) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Service) GetTitle() string {
if m != nil {
return m.Title
}
return ""
}
func (m *Service) GetProducerProjectId() string {
if m != nil {
return m.ProducerProjectId
}
return ""
}
func (m *Service) GetApis() []*google_protobuf4.Api {
if m != nil {
return m.Apis
}
return nil
}
func (m *Service) GetTypes() []*google_protobuf3.Type {
if m != nil {
return m.Types
}
return nil
}
func (m *Service) GetEnums() []*google_protobuf3.Enum {
if m != nil {
return m.Enums
}
return nil
}
func (m *Service) GetDocumentation() *Documentation {
if m != nil {
return m.Documentation
}
return nil
}
func (m *Service) GetBackend() *Backend {
if m != nil {
return m.Backend
}
return nil
}
func (m *Service) GetHttp() *google_api.Http {
if m != nil {
return m.Http
}
return nil
}
func (m *Service) GetQuota() *Quota {
if m != nil {
return m.Quota
}
return nil
}
func (m *Service) GetAuthentication() *Authentication {
if m != nil {
return m.Authentication
}
return nil
}
func (m *Service) GetContext() *Context {
if m != nil {
return m.Context
}
return nil
}
func (m *Service) GetUsage() *Usage {
if m != nil {
return m.Usage
}
return nil
}
func (m *Service) GetEndpoints() []*Endpoint {
if m != nil {
return m.Endpoints
}
return nil
}
func (m *Service) GetControl() *Control {
if m != nil {
return m.Control
}
return nil
}
func (m *Service) GetLogs() []*LogDescriptor {
if m != nil {
return m.Logs
}
return nil
}
func (m *Service) GetMetrics() []*google_api3.MetricDescriptor {
if m != nil {
return m.Metrics
}
return nil
}
func (m *Service) GetMonitoredResources() []*google_api6.MonitoredResourceDescriptor {
if m != nil {
return m.MonitoredResources
}
return nil
}
func (m *Service) GetLogging() *Logging {
if m != nil {
return m.Logging
}
return nil
}
func (m *Service) GetMonitoring() *Monitoring {
if m != nil {
return m.Monitoring
}
return nil
}
func (m *Service) GetSystemParameters() *SystemParameters {
if m != nil {
return m.SystemParameters
}
return nil
}
func (m *Service) GetSourceInfo() *SourceInfo {
if m != nil {
return m.SourceInfo
}
return nil
}
func (m *Service) GetExperimental() *google_api5.Experimental {
if m != nil {
return m.Experimental
}
return nil
}
func init() {
proto.RegisterType((*Service)(nil), "google.api.Service")
}
func init() { proto.RegisterFile("google/api/service.proto", fileDescriptor12) }
var fileDescriptor12 = []byte{
// 809 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x95, 0xdd, 0x6e, 0xdb, 0x36,
0x14, 0x80, 0x61, 0xd7, 0x6e, 0x66, 0x3a, 0xcd, 0x1a, 0xc6, 0x49, 0x19, 0xd7, 0x1b, 0xd2, 0xfd,
0xa0, 0xc6, 0x86, 0xca, 0x80, 0x0b, 0x74, 0x17, 0x1b, 0x30, 0xc4, 0x6d, 0xb0, 0x19, 0xe8, 0x00,
0x8f, 0x59, 0x8b, 0x61, 0x37, 0x06, 0x2d, 0xd1, 0x0a, 0x37, 0x89, 0xe4, 0x48, 0x2a, 0x8b, 0x5f,
0x67, 0xcf, 0xb6, 0x07, 0x19, 0x44, 0x52, 0x31, 0x65, 0x39, 0x77, 0xd6, 0xf9, 0xbe, 0x73, 0x7c,
0x28, 0x92, 0x47, 0x00, 0xa5, 0x42, 0xa4, 0x19, 0x9d, 0x10, 0xc9, 0x26, 0x9a, 0xaa, 0x5b, 0x16,
0xd3, 0x48, 0x2a, 0x61, 0x04, 0x04, 0x8e, 0x44, 0x44, 0xb2, 0xe1, 0x28, 0xb0, 0x08, 0xe7, 0xc2,
0x10, 0xc3, 0x04, 0xd7, 0xce, 0x1c, 0x9e, 0x86, 0xb4, 0x30, 0x37, 0x3e, 0x1c, 0x96, 0x5e, 0x91,
0xf8, 0x2f, 0xca, 0x93, 0x3d, 0x24, 0x16, 0xdc, 0xd0, 0x3b, 0xf3, 0x00, 0x51, 0x22, 0xf3, 0xe4,
0xf3, 0x80, 0x24, 0x22, 0x2e, 0x72, 0xca, 0x5d, 0x17, 0x9e, 0x9f, 0x07, 0x9c, 0xf2, 0x44, 0x0a,
0xc6, 0xab, 0xa2, 0xdf, 0x84, 0xe8, 0x4e, 0x52, 0xc5, 0x6c, 0x72, 0x56, 0x7b, 0xd8, 0xb3, 0x96,
0x1b, 0x63, 0xa4, 0x0f, 0x9f, 0x05, 0xe1, 0x8c, 0xac, 0x68, 0xa5, 0x0f, 0xc2, 0xb8, 0x48, 0xf7,
0xac, 0x22, 0x13, 0x69, 0xca, 0x78, 0x45, 0x9e, 0x05, 0x24, 0xa7, 0x46, 0xb1, 0xd8, 0x83, 0x2f,
0x43, 0x20, 0x38, 0x33, 0x42, 0xd1, 0x64, 0xa9, 0xa8, 0x16, 0x85, 0xaa, 0xb6, 0x64, 0xf8, 0xbc,
0x29, 0x6d, 0x4b, 0x87, 0x2d, 0xfe, 0x5d, 0x08, 0x43, 0x7c, 0x3c, 0xdc, 0x3b, 0x57, 0x6d, 0xc9,
0xf8, 0x5a, 0x78, 0xfa, 0x22, 0xa4, 0x1b, 0x6d, 0x68, 0xbe, 0x94, 0x44, 0x91, 0x9c, 0x1a, 0xaa,
0xf6, 0x14, 0x2e, 0x34, 0x49, 0xe9, 0xce, 0x1b, 0xb7, 0x4f, 0xab, 0x62, 0x3d, 0x21, 0x7c, 0xf3,
0x20, 0x92, 0xcc, 0xa3, 0xe1, 0x2e, 0x32, 0x1b, 0x49, 0x77, 0xf6, 0xf8, 0x9e, 0xfd, 0xa3, 0x88,
0x94, 0x54, 0xf9, 0x83, 0xf6, 0xc5, 0x7f, 0x3d, 0x70, 0x70, 0xed, 0x0e, 0x29, 0x7c, 0x0b, 0x8e,
0x62, 0xc1, 0xd7, 0x2c, 0x5d, 0xde, 0x52, 0xa5, 0x99, 0xe0, 0x68, 0x70, 0xd1, 0x1a, 0xf7, 0xa7,
0xa3, 0xc8, 0x9f, 0xdb, 0xaa, 0x48, 0xf4, 0x61, 0xce, 0xcd, 0xeb, 0xe9, 0x47, 0x92, 0x15, 0x14,
0x3f, 0x71, 0x39, 0x1f, 0x5d, 0x0a, 0x84, 0xa0, 0xc3, 0x49, 0x4e, 0x51, 0xeb, 0xa2, 0x35, 0xee,
0x61, 0xfb, 0x1b, 0x1e, 0x81, 0x36, 0x4b, 0xd0, 0x0b, 0x1b, 0x69, 0xb3, 0x04, 0x0e, 0x40, 0xd7,
0x30, 0x93, 0x51, 0xd4, 0xb6, 0x21, 0xf7, 0x00, 0x23, 0x70, 0x22, 0x95, 0x48, 0x8a, 0x98, 0xaa,
0xa5, 0x54, 0xe2, 0x4f, 0x1a, 0x9b, 0x25, 0x4b, 0xd0, 0x99, 0x75, 0x8e, 0x2b, 0xb4, 0x70, 0x64,
0x9e, 0xc0, 0x31, 0xe8, 0x10, 0xc9, 0x34, 0x7a, 0x74, 0xf1, 0x68, 0xdc, 0x9f, 0x0e, 0x1a, 0x4d,
0x5e, 0x4a, 0x86, 0xad, 0x01, 0xbf, 0x05, 0xdd, 0xf2, 0x95, 0x68, 0xd4, 0xb1, 0xea, 0x69, 0x43,
0xfd, 0x6d, 0x23, 0x29, 0x76, 0x4e, 0x29, 0x53, 0x5e, 0xe4, 0x1a, 0x75, 0x1f, 0x90, 0xaf, 0x78,
0x91, 0x63, 0xe7, 0xc0, 0x1f, 0xc1, 0x93, 0xda, 0xcd, 0x41, 0x8f, 0xed, 0x1b, 0x3b, 0x8f, 0xb6,
0x37, 0x3d, 0x7a, 0x17, 0x0a, 0xb8, 0xee, 0xc3, 0x57, 0xe0, 0xc0, 0x5f, 0x64, 0xf4, 0x89, 0x4d,
0x3d, 0x09, 0x53, 0x67, 0x0e, 0xe1, 0xca, 0x81, 0x5f, 0x81, 0x4e, 0x79, 0x85, 0x50, 0xcf, 0xba,
0x4f, 0x43, 0xf7, 0x67, 0x63, 0x24, 0xb6, 0x14, 0xbe, 0x04, 0x5d, 0x7b, 0x5c, 0x11, 0xb0, 0xda,
0x71, 0xa8, 0xfd, 0x5a, 0x02, 0xec, 0x38, 0x9c, 0x81, 0xa3, 0x72, 0xba, 0x50, 0x6e, 0x58, 0xec,
0xfa, 0xef, 0xdb, 0x8c, 0x61, 0x98, 0x71, 0x59, 0x33, 0xf0, 0x4e, 0x46, 0xb9, 0x02, 0x3f, 0x70,
0xd0, 0x61, 0x73, 0x05, 0x6f, 0x1d, 0xc2, 0x95, 0x53, 0xf6, 0x66, 0x4f, 0x3c, 0xfa, 0xb4, 0xd9,
0xdb, 0x87, 0x12, 0x60, 0xc7, 0xe1, 0x14, 0xf4, 0xaa, 0xa1, 0xa3, 0x11, 0xac, 0xef, 0x71, 0x29,
0x5f, 0x79, 0x88, 0xb7, 0x5a, 0xd5, 0x8b, 0x12, 0x19, 0x3a, 0xdd, 0xdf, 0x8b, 0x12, 0x19, 0xae,
0x1c, 0xf8, 0x0a, 0x74, 0x32, 0x91, 0x6a, 0xf4, 0xcc, 0x56, 0xaf, 0x6d, 0xda, 0x7b, 0x91, 0xbe,
0xa3, 0x3a, 0x56, 0x4c, 0x1a, 0xa1, 0xb0, 0xd5, 0xe0, 0x1b, 0x70, 0xe0, 0x06, 0x8c, 0x46, 0xc8,
0x66, 0x8c, 0xc2, 0x8c, 0x5f, 0x2c, 0x0a, 0x92, 0x2a, 0x19, 0xfe, 0x0e, 0x4e, 0x9a, 0xf3, 0x47,
0xa3, 0x73, 0x5b, 0xe3, 0x65, 0xad, 0x46, 0xa5, 0x61, 0x6f, 0x05, 0xe5, 0x60, 0xbe, 0x0b, 0xed,
0x7a, 0xfd, 0x30, 0x44, 0xcf, 0x9b, 0xeb, 0x7d, 0xef, 0x10, 0xae, 0x1c, 0xf8, 0x06, 0x80, 0xed,
0x8c, 0x43, 0x23, 0x9b, 0x71, 0xb6, 0xe7, 0xff, 0xcb, 0xa4, 0xc0, 0x84, 0x73, 0x70, 0xbc, 0x3b,
0xc8, 0x34, 0xfa, 0xac, 0x3e, 0x1b, 0xca, 0xf4, 0x6b, 0x2b, 0x2d, 0xee, 0x1d, 0xfc, 0x54, 0xef,
0x44, 0xe0, 0x77, 0xa0, 0x1f, 0x4c, 0x4c, 0xf4, 0x75, 0xb3, 0x87, 0x6b, 0x8b, 0xe7, 0x7c, 0x2d,
0x30, 0xd0, 0xf7, 0xbf, 0xe1, 0x0f, 0xe0, 0x30, 0xfc, 0xb6, 0x20, 0x6a, 0x33, 0x51, 0xed, 0x44,
0x04, 0x1c, 0xd7, 0xec, 0x19, 0x2f, 0x47, 0x5b, 0x1e, 0xc8, 0xb3, 0x43, 0x3f, 0xf5, 0x16, 0xe5,
0xb5, 0x5e, 0xb4, 0xfe, 0xb8, 0xf2, 0x2c, 0x15, 0x19, 0xe1, 0x69, 0x24, 0x54, 0x3a, 0x49, 0x29,
0xb7, 0x97, 0x7e, 0xe2, 0x50, 0x39, 0x4a, 0xc2, 0x8f, 0xba, 0x9b, 0x7b, 0xdf, 0xd7, 0x9e, 0xfe,
0x6d, 0x77, 0x7e, 0xba, 0x5c, 0xcc, 0x57, 0x8f, 0x6d, 0xe2, 0xeb, 0xff, 0x03, 0x00, 0x00, 0xff,
0xff, 0xcc, 0xae, 0xb3, 0x8f, 0x0c, 0x08, 0x00, 0x00,
}

View File

@ -0,0 +1,55 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/source_info.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf1 "github.com/golang/protobuf/ptypes/any"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Source information used to create a Service Config
type SourceInfo struct {
// All files used during config generation.
SourceFiles []*google_protobuf1.Any `protobuf:"bytes,1,rep,name=source_files,json=sourceFiles" json:"source_files,omitempty"`
}
func (m *SourceInfo) Reset() { *m = SourceInfo{} }
func (m *SourceInfo) String() string { return proto.CompactTextString(m) }
func (*SourceInfo) ProtoMessage() {}
func (*SourceInfo) Descriptor() ([]byte, []int) { return fileDescriptor13, []int{0} }
func (m *SourceInfo) GetSourceFiles() []*google_protobuf1.Any {
if m != nil {
return m.SourceFiles
}
return nil
}
func init() {
proto.RegisterType((*SourceInfo)(nil), "google.api.SourceInfo")
}
func init() { proto.RegisterFile("google/api/source_info.proto", fileDescriptor13) }
var fileDescriptor13 = []byte{
// 198 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x2f, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x8d, 0xcf, 0xcc,
0x4b, 0xcb, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64,
0x4a, 0x49, 0x42, 0x55, 0x82, 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94,
0x5c, 0xb9, 0xb8, 0x82, 0xc1, 0x7a, 0x3d, 0xf3, 0xd2, 0xf2, 0x85, 0xcc, 0xb9, 0x78, 0xa0, 0x26,
0xa5, 0x65, 0xe6, 0xa4, 0x16, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0x89, 0xe8, 0x41, 0xcd,
0x82, 0xe9, 0xd7, 0x73, 0xcc, 0xab, 0x0c, 0xe2, 0x86, 0xa8, 0x74, 0x03, 0x29, 0x74, 0x2a, 0xe4,
0xe2, 0x4b, 0xce, 0xcf, 0xd5, 0x43, 0xd8, 0xe9, 0xc4, 0x8f, 0x30, 0x36, 0x00, 0xa4, 0x2d, 0x80,
0x31, 0xca, 0x15, 0x2a, 0x9d, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x97, 0x5f, 0x94, 0xae, 0x9f,
0x9e, 0x9a, 0x07, 0x36, 0x54, 0x1f, 0x22, 0x95, 0x58, 0x90, 0x59, 0x0c, 0xf1, 0x4f, 0x6a, 0x51,
0x59, 0x66, 0x72, 0x6a, 0x72, 0x7e, 0x5e, 0x5a, 0x66, 0xba, 0x35, 0x0a, 0x6f, 0x11, 0x13, 0x8b,
0xbb, 0x63, 0x80, 0x67, 0x12, 0x1b, 0x58, 0xa3, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x78,
0x5d, 0xab, 0x07, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,169 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/system_parameter.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// ### System parameter configuration
//
// A system parameter is a special kind of parameter defined by the API
// system, not by an individual API. It is typically mapped to an HTTP header
// and/or a URL query parameter. This configuration specifies which methods
// change the names of the system parameters.
type SystemParameters struct {
// Define system parameters.
//
// The parameters defined here will override the default parameters
// implemented by the system. If this field is missing from the service
// config, default system parameters will be used. Default system parameters
// and names is implementation-dependent.
//
// Example: define api key for all methods
//
// system_parameters
// rules:
// - selector: "*"
// parameters:
// - name: api_key
// url_query_parameter: api_key
//
//
// Example: define 2 api key names for a specific method.
//
// system_parameters
// rules:
// - selector: "/ListShelves"
// parameters:
// - name: api_key
// http_header: Api-Key1
// - name: api_key
// http_header: Api-Key2
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*SystemParameterRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
}
func (m *SystemParameters) Reset() { *m = SystemParameters{} }
func (m *SystemParameters) String() string { return proto.CompactTextString(m) }
func (*SystemParameters) ProtoMessage() {}
func (*SystemParameters) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{0} }
func (m *SystemParameters) GetRules() []*SystemParameterRule {
if m != nil {
return m.Rules
}
return nil
}
// Define a system parameter rule mapping system parameter definitions to
// methods.
type SystemParameterRule struct {
// Selects the methods to which this rule applies. Use '*' to indicate all
// methods in all APIs.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
// Define parameters. Multiple names may be defined for a parameter.
// For a given method call, only one of them should be used. If multiple
// names are used the behavior is implementation-dependent.
// If none of the specified names are present the behavior is
// parameter-dependent.
Parameters []*SystemParameter `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty"`
}
func (m *SystemParameterRule) Reset() { *m = SystemParameterRule{} }
func (m *SystemParameterRule) String() string { return proto.CompactTextString(m) }
func (*SystemParameterRule) ProtoMessage() {}
func (*SystemParameterRule) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{1} }
func (m *SystemParameterRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *SystemParameterRule) GetParameters() []*SystemParameter {
if m != nil {
return m.Parameters
}
return nil
}
// Define a parameter's name and location. The parameter may be passed as either
// an HTTP header or a URL query parameter, and if both are passed the behavior
// is implementation-dependent.
type SystemParameter struct {
// Define the name of the parameter, such as "api_key" . It is case sensitive.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Define the HTTP header name to use for the parameter. It is case
// insensitive.
HttpHeader string `protobuf:"bytes,2,opt,name=http_header,json=httpHeader" json:"http_header,omitempty"`
// Define the URL query parameter name to use for the parameter. It is case
// sensitive.
UrlQueryParameter string `protobuf:"bytes,3,opt,name=url_query_parameter,json=urlQueryParameter" json:"url_query_parameter,omitempty"`
}
func (m *SystemParameter) Reset() { *m = SystemParameter{} }
func (m *SystemParameter) String() string { return proto.CompactTextString(m) }
func (*SystemParameter) ProtoMessage() {}
func (*SystemParameter) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{2} }
func (m *SystemParameter) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *SystemParameter) GetHttpHeader() string {
if m != nil {
return m.HttpHeader
}
return ""
}
func (m *SystemParameter) GetUrlQueryParameter() string {
if m != nil {
return m.UrlQueryParameter
}
return ""
}
func init() {
proto.RegisterType((*SystemParameters)(nil), "google.api.SystemParameters")
proto.RegisterType((*SystemParameterRule)(nil), "google.api.SystemParameterRule")
proto.RegisterType((*SystemParameter)(nil), "google.api.SystemParameter")
}
func init() { proto.RegisterFile("google/api/system_parameter.proto", fileDescriptor14) }
var fileDescriptor14 = []byte{
// 286 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xbf, 0x4e, 0xc3, 0x30,
0x10, 0x87, 0x95, 0xb6, 0x20, 0xb8, 0x4a, 0xfc, 0x71, 0x19, 0x22, 0x18, 0x5a, 0x3a, 0x75, 0x72,
0x24, 0x10, 0x53, 0x27, 0x2a, 0x21, 0xe8, 0x16, 0xca, 0xc6, 0x12, 0x99, 0x70, 0xb8, 0x91, 0x9c,
0xd8, 0x9c, 0x9d, 0x48, 0x7d, 0x1d, 0x9e, 0x14, 0xc5, 0x29, 0x69, 0x89, 0x10, 0x9b, 0xef, 0xbe,
0xcf, 0xfa, 0x9d, 0xee, 0xe0, 0x5a, 0x6a, 0x2d, 0x15, 0x46, 0xc2, 0x64, 0x91, 0xdd, 0x58, 0x87,
0x79, 0x62, 0x04, 0x89, 0x1c, 0x1d, 0x12, 0x37, 0xa4, 0x9d, 0x66, 0xd0, 0x28, 0x5c, 0x98, 0x6c,
0xba, 0x84, 0xb3, 0x17, 0x6f, 0xc5, 0x3f, 0x92, 0x65, 0x77, 0x70, 0x40, 0xa5, 0x42, 0x1b, 0x06,
0x93, 0xfe, 0x6c, 0x78, 0x33, 0xe6, 0x3b, 0x9f, 0x77, 0xe4, 0x55, 0xa9, 0x70, 0xd5, 0xd8, 0xd3,
0x02, 0x46, 0x7f, 0x50, 0x76, 0x09, 0x47, 0x16, 0x15, 0xa6, 0x4e, 0x53, 0x18, 0x4c, 0x82, 0xd9,
0xf1, 0xaa, 0xad, 0xd9, 0x1c, 0xa0, 0x1d, 0xce, 0x86, 0x3d, 0x1f, 0x77, 0xf5, 0x5f, 0xdc, 0x9e,
0x3e, 0xad, 0xe0, 0xb4, 0x83, 0x19, 0x83, 0x41, 0x21, 0x72, 0xdc, 0xe6, 0xf8, 0x37, 0x1b, 0xc3,
0x70, 0xed, 0x9c, 0x49, 0xd6, 0x28, 0xde, 0x91, 0xc2, 0x9e, 0x47, 0x50, 0xb7, 0x9e, 0x7c, 0x87,
0x71, 0x18, 0x95, 0xa4, 0x92, 0xcf, 0x12, 0x69, 0xb3, 0xdb, 0x55, 0xd8, 0xf7, 0xe2, 0x79, 0x49,
0xea, 0xb9, 0x26, 0x6d, 0xc8, 0xa2, 0x82, 0x93, 0x54, 0xe7, 0x7b, 0x53, 0x2e, 0x2e, 0x3a, 0x73,
0xc4, 0xf5, 0x9a, 0xe3, 0xe0, 0xf5, 0x61, 0xeb, 0x48, 0xad, 0x44, 0x21, 0xb9, 0x26, 0x19, 0x49,
0x2c, 0xfc, 0x11, 0xa2, 0x06, 0x09, 0x93, 0xd9, 0xe6, 0x54, 0x48, 0x55, 0x96, 0x62, 0xaa, 0x8b,
0x8f, 0x4c, 0xce, 0x7f, 0x55, 0x5f, 0xbd, 0xc1, 0xe3, 0x7d, 0xbc, 0x7c, 0x3b, 0xf4, 0x1f, 0x6f,
0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x5e, 0xdf, 0x2e, 0x09, 0xe2, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,145 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/api/usage.proto
package serviceconfig
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Configuration controlling usage of a service.
type Usage struct {
// Requirements that must be satisfied before a consumer project can use the
// service. Each requirement is of the form <service.name>/<requirement-id>;
// for example 'serviceusage.googleapis.com/billing-enabled'.
Requirements []string `protobuf:"bytes,1,rep,name=requirements" json:"requirements,omitempty"`
// A list of usage rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
Rules []*UsageRule `protobuf:"bytes,6,rep,name=rules" json:"rules,omitempty"`
// The full resource name of a channel used for sending notifications to the
// service producer.
//
// Google Service Management currently only supports
// [Google Cloud Pub/Sub](https://cloud.google.com/pubsub) as a notification
// channel. To use Google Cloud Pub/Sub as the channel, this must be the name
// of a Cloud Pub/Sub topic that uses the Cloud Pub/Sub topic name format
// documented in https://cloud.google.com/pubsub/docs/overview.
ProducerNotificationChannel string `protobuf:"bytes,7,opt,name=producer_notification_channel,json=producerNotificationChannel" json:"producer_notification_channel,omitempty"`
}
func (m *Usage) Reset() { *m = Usage{} }
func (m *Usage) String() string { return proto.CompactTextString(m) }
func (*Usage) ProtoMessage() {}
func (*Usage) Descriptor() ([]byte, []int) { return fileDescriptor15, []int{0} }
func (m *Usage) GetRequirements() []string {
if m != nil {
return m.Requirements
}
return nil
}
func (m *Usage) GetRules() []*UsageRule {
if m != nil {
return m.Rules
}
return nil
}
func (m *Usage) GetProducerNotificationChannel() string {
if m != nil {
return m.ProducerNotificationChannel
}
return ""
}
// Usage configuration rules for the service.
//
// NOTE: Under development.
//
//
// Use this rule to configure unregistered calls for the service. Unregistered
// calls are calls that do not contain consumer project identity.
// (Example: calls that do not contain an API key).
// By default, API methods do not allow unregistered calls, and each method call
// must be identified by a consumer project identity. Use this rule to
// allow/disallow unregistered calls.
//
// Example of an API that wants to allow unregistered calls for entire service.
//
// usage:
// rules:
// - selector: "*"
// allow_unregistered_calls: true
//
// Example of a method that wants to allow unregistered calls.
//
// usage:
// rules:
// - selector: "google.example.library.v1.LibraryService.CreateBook"
// allow_unregistered_calls: true
type UsageRule struct {
// Selects the methods to which this rule applies. Use '*' to indicate all
// methods in all APIs.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
Selector string `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"`
// True, if the method allows unregistered calls; false otherwise.
AllowUnregisteredCalls bool `protobuf:"varint,2,opt,name=allow_unregistered_calls,json=allowUnregisteredCalls" json:"allow_unregistered_calls,omitempty"`
}
func (m *UsageRule) Reset() { *m = UsageRule{} }
func (m *UsageRule) String() string { return proto.CompactTextString(m) }
func (*UsageRule) ProtoMessage() {}
func (*UsageRule) Descriptor() ([]byte, []int) { return fileDescriptor15, []int{1} }
func (m *UsageRule) GetSelector() string {
if m != nil {
return m.Selector
}
return ""
}
func (m *UsageRule) GetAllowUnregisteredCalls() bool {
if m != nil {
return m.AllowUnregisteredCalls
}
return false
}
func init() {
proto.RegisterType((*Usage)(nil), "google.api.Usage")
proto.RegisterType((*UsageRule)(nil), "google.api.UsageRule")
}
func init() { proto.RegisterFile("google/api/usage.proto", fileDescriptor15) }
var fileDescriptor15 = []byte{
// 303 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xcf, 0x4b, 0x3b, 0x31,
0x10, 0xc5, 0x49, 0xfb, 0x6d, 0xbf, 0x6d, 0x14, 0x0f, 0x01, 0x4b, 0xa8, 0x0a, 0x4b, 0x4f, 0x0b,
0xc2, 0x2e, 0xe8, 0x45, 0xf0, 0x64, 0x8b, 0x88, 0x17, 0x29, 0x0b, 0xbd, 0x78, 0x59, 0x62, 0x3a,
0x8d, 0x81, 0x34, 0xb3, 0xe6, 0x87, 0xfe, 0x33, 0x9e, 0xfc, 0x4b, 0xa5, 0x59, 0xad, 0xdb, 0xe3,
0xcc, 0xfb, 0xbc, 0x79, 0xcc, 0xa3, 0x13, 0x85, 0xa8, 0x0c, 0x94, 0xa2, 0xd1, 0x65, 0xf4, 0x42,
0x41, 0xd1, 0x38, 0x0c, 0xc8, 0x68, 0xbb, 0x2f, 0x44, 0xa3, 0xa7, 0xe7, 0x1d, 0x46, 0x58, 0x8b,
0x41, 0x04, 0x8d, 0xd6, 0xb7, 0xe4, 0xec, 0x93, 0xd0, 0xc1, 0x6a, 0xe7, 0x64, 0x33, 0x7a, 0xec,
0xe0, 0x2d, 0x6a, 0x07, 0x5b, 0xb0, 0xc1, 0x73, 0x92, 0xf5, 0xf3, 0x71, 0x75, 0xb0, 0x63, 0x97,
0x74, 0xe0, 0xa2, 0x01, 0xcf, 0x87, 0x59, 0x3f, 0x3f, 0xba, 0x3a, 0x2d, 0xfe, 0x72, 0x8a, 0x74,
0xa5, 0x8a, 0x06, 0xaa, 0x96, 0x61, 0x73, 0x7a, 0xd1, 0x38, 0x5c, 0x47, 0x09, 0xae, 0xb6, 0x18,
0xf4, 0x46, 0xcb, 0x14, 0x5d, 0xcb, 0x57, 0x61, 0x2d, 0x18, 0xfe, 0x3f, 0x23, 0xf9, 0xb8, 0x3a,
0xfb, 0x85, 0x9e, 0x3a, 0xcc, 0xa2, 0x45, 0x66, 0x82, 0x8e, 0xf7, 0x77, 0xd9, 0x94, 0x8e, 0x3c,
0x18, 0x90, 0x01, 0x1d, 0x27, 0xc9, 0xbb, 0x9f, 0xd9, 0x0d, 0xe5, 0xc2, 0x18, 0xfc, 0xa8, 0xa3,
0x75, 0xa0, 0xb4, 0x0f, 0xe0, 0x60, 0x5d, 0x4b, 0x61, 0x8c, 0xe7, 0xbd, 0x8c, 0xe4, 0xa3, 0x6a,
0x92, 0xf4, 0x55, 0x47, 0x5e, 0xec, 0xd4, 0xb9, 0xa1, 0x27, 0x12, 0xb7, 0x9d, 0x4f, 0xe6, 0x34,
0x45, 0x2e, 0x77, 0xfd, 0x2c, 0xc9, 0xf3, 0xfd, 0x8f, 0xa2, 0xd0, 0x08, 0xab, 0x0a, 0x74, 0xaa,
0x54, 0x60, 0x53, 0x7b, 0x65, 0x2b, 0x89, 0x46, 0xfb, 0x54, 0xaf, 0x07, 0xf7, 0xae, 0x25, 0x48,
0xb4, 0x1b, 0xad, 0x6e, 0x0f, 0xa6, 0xaf, 0xde, 0xbf, 0x87, 0xbb, 0xe5, 0xe3, 0xcb, 0x30, 0x19,
0xaf, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x8d, 0xba, 0x6f, 0x72, 0xba, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,921 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/bigtable/admin/v2/bigtable_instance_admin.proto
/*
Package admin is a generated protocol buffer package.
It is generated from these files:
google/bigtable/admin/v2/bigtable_instance_admin.proto
google/bigtable/admin/v2/bigtable_table_admin.proto
google/bigtable/admin/v2/common.proto
google/bigtable/admin/v2/instance.proto
google/bigtable/admin/v2/table.proto
It has these top-level messages:
CreateInstanceRequest
GetInstanceRequest
ListInstancesRequest
ListInstancesResponse
DeleteInstanceRequest
CreateClusterRequest
GetClusterRequest
ListClustersRequest
ListClustersResponse
DeleteClusterRequest
CreateInstanceMetadata
UpdateClusterMetadata
CreateTableRequest
DropRowRangeRequest
ListTablesRequest
ListTablesResponse
GetTableRequest
DeleteTableRequest
ModifyColumnFamiliesRequest
Instance
Cluster
Table
ColumnFamily
GcRule
*/
package admin
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_longrunning "google.golang.org/genproto/googleapis/longrunning"
import google_protobuf3 "github.com/golang/protobuf/ptypes/empty"
import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Request message for BigtableInstanceAdmin.CreateInstance.
type CreateInstanceRequest struct {
// The unique name of the project in which to create the new instance.
// Values are of the form `projects/<project>`.
Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"`
// The ID to be used when referring to the new instance within its project,
// e.g., just `myinstance` rather than
// `projects/myproject/instances/myinstance`.
InstanceId string `protobuf:"bytes,2,opt,name=instance_id,json=instanceId" json:"instance_id,omitempty"`
// The instance to create.
// Fields marked `OutputOnly` must be left blank.
Instance *Instance `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
// The clusters to be created within the instance, mapped by desired
// cluster ID, e.g., just `mycluster` rather than
// `projects/myproject/instances/myinstance/clusters/mycluster`.
// Fields marked `OutputOnly` must be left blank.
// Currently exactly one cluster must be specified.
Clusters map[string]*Cluster `protobuf:"bytes,4,rep,name=clusters" json:"clusters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *CreateInstanceRequest) Reset() { *m = CreateInstanceRequest{} }
func (m *CreateInstanceRequest) String() string { return proto.CompactTextString(m) }
func (*CreateInstanceRequest) ProtoMessage() {}
func (*CreateInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *CreateInstanceRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *CreateInstanceRequest) GetInstanceId() string {
if m != nil {
return m.InstanceId
}
return ""
}
func (m *CreateInstanceRequest) GetInstance() *Instance {
if m != nil {
return m.Instance
}
return nil
}
func (m *CreateInstanceRequest) GetClusters() map[string]*Cluster {
if m != nil {
return m.Clusters
}
return nil
}
// Request message for BigtableInstanceAdmin.GetInstance.
type GetInstanceRequest struct {
// The unique name of the requested instance. Values are of the form
// `projects/<project>/instances/<instance>`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *GetInstanceRequest) Reset() { *m = GetInstanceRequest{} }
func (m *GetInstanceRequest) String() string { return proto.CompactTextString(m) }
func (*GetInstanceRequest) ProtoMessage() {}
func (*GetInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *GetInstanceRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Request message for BigtableInstanceAdmin.ListInstances.
type ListInstancesRequest struct {
// The unique name of the project for which a list of instances is requested.
// Values are of the form `projects/<project>`.
Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"`
// The value of `next_page_token` returned by a previous call.
PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
}
func (m *ListInstancesRequest) Reset() { *m = ListInstancesRequest{} }
func (m *ListInstancesRequest) String() string { return proto.CompactTextString(m) }
func (*ListInstancesRequest) ProtoMessage() {}
func (*ListInstancesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *ListInstancesRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *ListInstancesRequest) GetPageToken() string {
if m != nil {
return m.PageToken
}
return ""
}
// Response message for BigtableInstanceAdmin.ListInstances.
type ListInstancesResponse struct {
// The list of requested instances.
Instances []*Instance `protobuf:"bytes,1,rep,name=instances" json:"instances,omitempty"`
// Locations from which Instance information could not be retrieved,
// due to an outage or some other transient condition.
// Instances whose Clusters are all in one of the failed locations
// may be missing from `instances`, and Instances with at least one
// Cluster in a failed location may only have partial information returned.
FailedLocations []string `protobuf:"bytes,2,rep,name=failed_locations,json=failedLocations" json:"failed_locations,omitempty"`
// Set if not all instances could be returned in a single response.
// Pass this value to `page_token` in another request to get the next
// page of results.
NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
}
func (m *ListInstancesResponse) Reset() { *m = ListInstancesResponse{} }
func (m *ListInstancesResponse) String() string { return proto.CompactTextString(m) }
func (*ListInstancesResponse) ProtoMessage() {}
func (*ListInstancesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *ListInstancesResponse) GetInstances() []*Instance {
if m != nil {
return m.Instances
}
return nil
}
func (m *ListInstancesResponse) GetFailedLocations() []string {
if m != nil {
return m.FailedLocations
}
return nil
}
func (m *ListInstancesResponse) GetNextPageToken() string {
if m != nil {
return m.NextPageToken
}
return ""
}
// Request message for BigtableInstanceAdmin.DeleteInstance.
type DeleteInstanceRequest struct {
// The unique name of the instance to be deleted.
// Values are of the form `projects/<project>/instances/<instance>`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *DeleteInstanceRequest) Reset() { *m = DeleteInstanceRequest{} }
func (m *DeleteInstanceRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteInstanceRequest) ProtoMessage() {}
func (*DeleteInstanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *DeleteInstanceRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Request message for BigtableInstanceAdmin.CreateCluster.
type CreateClusterRequest struct {
// The unique name of the instance in which to create the new cluster.
// Values are of the form
// `projects/<project>/instances/<instance>`.
Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"`
// The ID to be used when referring to the new cluster within its instance,
// e.g., just `mycluster` rather than
// `projects/myproject/instances/myinstance/clusters/mycluster`.
ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"`
// The cluster to be created.
// Fields marked `OutputOnly` must be left blank.
Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"`
}
func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} }
func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) }
func (*CreateClusterRequest) ProtoMessage() {}
func (*CreateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *CreateClusterRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *CreateClusterRequest) GetClusterId() string {
if m != nil {
return m.ClusterId
}
return ""
}
func (m *CreateClusterRequest) GetCluster() *Cluster {
if m != nil {
return m.Cluster
}
return nil
}
// Request message for BigtableInstanceAdmin.GetCluster.
type GetClusterRequest struct {
// The unique name of the requested cluster. Values are of the form
// `projects/<project>/instances/<instance>/clusters/<cluster>`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} }
func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) }
func (*GetClusterRequest) ProtoMessage() {}
func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *GetClusterRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Request message for BigtableInstanceAdmin.ListClusters.
type ListClustersRequest struct {
// The unique name of the instance for which a list of clusters is requested.
// Values are of the form `projects/<project>/instances/<instance>`.
// Use `<instance> = '-'` to list Clusters for all Instances in a project,
// e.g., `projects/myproject/instances/-`.
Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"`
// The value of `next_page_token` returned by a previous call.
PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
}
func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} }
func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) }
func (*ListClustersRequest) ProtoMessage() {}
func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *ListClustersRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *ListClustersRequest) GetPageToken() string {
if m != nil {
return m.PageToken
}
return ""
}
// Response message for BigtableInstanceAdmin.ListClusters.
type ListClustersResponse struct {
// The list of requested clusters.
Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"`
// Locations from which Cluster information could not be retrieved,
// due to an outage or some other transient condition.
// Clusters from these locations may be missing from `clusters`,
// or may only have partial information returned.
FailedLocations []string `protobuf:"bytes,2,rep,name=failed_locations,json=failedLocations" json:"failed_locations,omitempty"`
// Set if not all clusters could be returned in a single response.
// Pass this value to `page_token` in another request to get the next
// page of results.
NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
}
func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} }
func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) }
func (*ListClustersResponse) ProtoMessage() {}
func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *ListClustersResponse) GetClusters() []*Cluster {
if m != nil {
return m.Clusters
}
return nil
}
func (m *ListClustersResponse) GetFailedLocations() []string {
if m != nil {
return m.FailedLocations
}
return nil
}
func (m *ListClustersResponse) GetNextPageToken() string {
if m != nil {
return m.NextPageToken
}
return ""
}
// Request message for BigtableInstanceAdmin.DeleteCluster.
type DeleteClusterRequest struct {
// The unique name of the cluster to be deleted. Values are of the form
// `projects/<project>/instances/<instance>/clusters/<cluster>`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} }
func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteClusterRequest) ProtoMessage() {}
func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *DeleteClusterRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// The metadata for the Operation returned by CreateInstance.
type CreateInstanceMetadata struct {
// The request that prompted the initiation of this CreateInstance operation.
OriginalRequest *CreateInstanceRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"`
// The time at which the original request was received.
RequestTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime" json:"request_time,omitempty"`
// The time at which the operation failed or was completed successfully.
FinishTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"`
}
func (m *CreateInstanceMetadata) Reset() { *m = CreateInstanceMetadata{} }
func (m *CreateInstanceMetadata) String() string { return proto.CompactTextString(m) }
func (*CreateInstanceMetadata) ProtoMessage() {}
func (*CreateInstanceMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *CreateInstanceMetadata) GetOriginalRequest() *CreateInstanceRequest {
if m != nil {
return m.OriginalRequest
}
return nil
}
func (m *CreateInstanceMetadata) GetRequestTime() *google_protobuf1.Timestamp {
if m != nil {
return m.RequestTime
}
return nil
}
func (m *CreateInstanceMetadata) GetFinishTime() *google_protobuf1.Timestamp {
if m != nil {
return m.FinishTime
}
return nil
}
// The metadata for the Operation returned by UpdateCluster.
type UpdateClusterMetadata struct {
// The request that prompted the initiation of this UpdateCluster operation.
OriginalRequest *Cluster `protobuf:"bytes,1,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"`
// The time at which the original request was received.
RequestTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime" json:"request_time,omitempty"`
// The time at which the operation failed or was completed successfully.
FinishTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"`
}
func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} }
func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) }
func (*UpdateClusterMetadata) ProtoMessage() {}
func (*UpdateClusterMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
func (m *UpdateClusterMetadata) GetOriginalRequest() *Cluster {
if m != nil {
return m.OriginalRequest
}
return nil
}
func (m *UpdateClusterMetadata) GetRequestTime() *google_protobuf1.Timestamp {
if m != nil {
return m.RequestTime
}
return nil
}
func (m *UpdateClusterMetadata) GetFinishTime() *google_protobuf1.Timestamp {
if m != nil {
return m.FinishTime
}
return nil
}
func init() {
proto.RegisterType((*CreateInstanceRequest)(nil), "google.bigtable.admin.v2.CreateInstanceRequest")
proto.RegisterType((*GetInstanceRequest)(nil), "google.bigtable.admin.v2.GetInstanceRequest")
proto.RegisterType((*ListInstancesRequest)(nil), "google.bigtable.admin.v2.ListInstancesRequest")
proto.RegisterType((*ListInstancesResponse)(nil), "google.bigtable.admin.v2.ListInstancesResponse")
proto.RegisterType((*DeleteInstanceRequest)(nil), "google.bigtable.admin.v2.DeleteInstanceRequest")
proto.RegisterType((*CreateClusterRequest)(nil), "google.bigtable.admin.v2.CreateClusterRequest")
proto.RegisterType((*GetClusterRequest)(nil), "google.bigtable.admin.v2.GetClusterRequest")
proto.RegisterType((*ListClustersRequest)(nil), "google.bigtable.admin.v2.ListClustersRequest")
proto.RegisterType((*ListClustersResponse)(nil), "google.bigtable.admin.v2.ListClustersResponse")
proto.RegisterType((*DeleteClusterRequest)(nil), "google.bigtable.admin.v2.DeleteClusterRequest")
proto.RegisterType((*CreateInstanceMetadata)(nil), "google.bigtable.admin.v2.CreateInstanceMetadata")
proto.RegisterType((*UpdateClusterMetadata)(nil), "google.bigtable.admin.v2.UpdateClusterMetadata")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for BigtableInstanceAdmin service
type BigtableInstanceAdminClient interface {
// Create an instance within a project.
CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error)
// Gets information about an instance.
GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error)
// Lists information about instances in a project.
ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error)
// Updates an instance within a project.
UpdateInstance(ctx context.Context, in *Instance, opts ...grpc.CallOption) (*Instance, error)
// Delete an instance from a project.
DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error)
// Creates a cluster within an instance.
CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error)
// Gets information about a cluster.
GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error)
// Lists information about clusters in an instance.
ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
// Updates a cluster within an instance.
UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*google_longrunning.Operation, error)
// Deletes a cluster from an instance.
DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error)
}
type bigtableInstanceAdminClient struct {
cc *grpc.ClientConn
}
func NewBigtableInstanceAdminClient(cc *grpc.ClientConn) BigtableInstanceAdminClient {
return &bigtableInstanceAdminClient{cc}
}
func (c *bigtableInstanceAdminClient) CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) {
out := new(google_longrunning.Operation)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableInstanceAdminClient) GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) {
out := new(Instance)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableInstanceAdminClient) ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) {
out := new(ListInstancesResponse)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableInstanceAdminClient) UpdateInstance(ctx context.Context, in *Instance, opts ...grpc.CallOption) (*Instance, error) {
out := new(Instance)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableInstanceAdminClient) DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) {
out := new(google_protobuf3.Empty)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableInstanceAdminClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) {
out := new(google_longrunning.Operation)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableInstanceAdminClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) {
out := new(Cluster)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableInstanceAdminClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) {
out := new(ListClustersResponse)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableInstanceAdminClient) UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*google_longrunning.Operation, error) {
out := new(google_longrunning.Operation)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableInstanceAdminClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) {
out := new(google_protobuf3.Empty)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for BigtableInstanceAdmin service
type BigtableInstanceAdminServer interface {
// Create an instance within a project.
CreateInstance(context.Context, *CreateInstanceRequest) (*google_longrunning.Operation, error)
// Gets information about an instance.
GetInstance(context.Context, *GetInstanceRequest) (*Instance, error)
// Lists information about instances in a project.
ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error)
// Updates an instance within a project.
UpdateInstance(context.Context, *Instance) (*Instance, error)
// Delete an instance from a project.
DeleteInstance(context.Context, *DeleteInstanceRequest) (*google_protobuf3.Empty, error)
// Creates a cluster within an instance.
CreateCluster(context.Context, *CreateClusterRequest) (*google_longrunning.Operation, error)
// Gets information about a cluster.
GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
// Lists information about clusters in an instance.
ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
// Updates a cluster within an instance.
UpdateCluster(context.Context, *Cluster) (*google_longrunning.Operation, error)
// Deletes a cluster from an instance.
DeleteCluster(context.Context, *DeleteClusterRequest) (*google_protobuf3.Empty, error)
}
func RegisterBigtableInstanceAdminServer(s *grpc.Server, srv BigtableInstanceAdminServer) {
s.RegisterService(&_BigtableInstanceAdmin_serviceDesc, srv)
}
func _BigtableInstanceAdmin_CreateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableInstanceAdminServer).CreateInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableInstanceAdminServer).CreateInstance(ctx, req.(*CreateInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableInstanceAdmin_GetInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableInstanceAdminServer).GetInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableInstanceAdminServer).GetInstance(ctx, req.(*GetInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableInstanceAdmin_ListInstances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListInstancesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableInstanceAdminServer).ListInstances(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableInstanceAdminServer).ListInstances(ctx, req.(*ListInstancesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableInstanceAdmin_UpdateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Instance)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableInstanceAdminServer).UpdateInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableInstanceAdminServer).UpdateInstance(ctx, req.(*Instance))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableInstanceAdmin_DeleteInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteInstanceRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableInstanceAdminServer).DeleteInstance(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableInstanceAdminServer).DeleteInstance(ctx, req.(*DeleteInstanceRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableInstanceAdmin_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateClusterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableInstanceAdminServer).CreateCluster(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableInstanceAdminServer).CreateCluster(ctx, req.(*CreateClusterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableInstanceAdmin_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetClusterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableInstanceAdminServer).GetCluster(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableInstanceAdminServer).GetCluster(ctx, req.(*GetClusterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableInstanceAdmin_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListClustersRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableInstanceAdminServer).ListClusters(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableInstanceAdminServer).ListClusters(ctx, req.(*ListClustersRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableInstanceAdmin_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Cluster)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableInstanceAdminServer).UpdateCluster(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableInstanceAdminServer).UpdateCluster(ctx, req.(*Cluster))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableInstanceAdmin_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteClusterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableInstanceAdminServer).DeleteCluster(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableInstanceAdminServer).DeleteCluster(ctx, req.(*DeleteClusterRequest))
}
return interceptor(ctx, in, info, handler)
}
var _BigtableInstanceAdmin_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.bigtable.admin.v2.BigtableInstanceAdmin",
HandlerType: (*BigtableInstanceAdminServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateInstance",
Handler: _BigtableInstanceAdmin_CreateInstance_Handler,
},
{
MethodName: "GetInstance",
Handler: _BigtableInstanceAdmin_GetInstance_Handler,
},
{
MethodName: "ListInstances",
Handler: _BigtableInstanceAdmin_ListInstances_Handler,
},
{
MethodName: "UpdateInstance",
Handler: _BigtableInstanceAdmin_UpdateInstance_Handler,
},
{
MethodName: "DeleteInstance",
Handler: _BigtableInstanceAdmin_DeleteInstance_Handler,
},
{
MethodName: "CreateCluster",
Handler: _BigtableInstanceAdmin_CreateCluster_Handler,
},
{
MethodName: "GetCluster",
Handler: _BigtableInstanceAdmin_GetCluster_Handler,
},
{
MethodName: "ListClusters",
Handler: _BigtableInstanceAdmin_ListClusters_Handler,
},
{
MethodName: "UpdateCluster",
Handler: _BigtableInstanceAdmin_UpdateCluster_Handler,
},
{
MethodName: "DeleteCluster",
Handler: _BigtableInstanceAdmin_DeleteCluster_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/bigtable/admin/v2/bigtable_instance_admin.proto",
}
func init() {
proto.RegisterFile("google/bigtable/admin/v2/bigtable_instance_admin.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 985 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x41, 0x8f, 0xdb, 0x44,
0x14, 0xd6, 0x24, 0xa5, 0x34, 0xcf, 0xcd, 0xee, 0x32, 0x6c, 0x56, 0x91, 0x69, 0xd5, 0xad, 0x2b,
0xb5, 0x69, 0xba, 0xd8, 0x22, 0x20, 0x16, 0xed, 0x2a, 0x08, 0xb6, 0x54, 0xd5, 0x4a, 0x5b, 0xb1,
0x8a, 0xca, 0x81, 0x1e, 0x88, 0x66, 0x93, 0x59, 0x63, 0xea, 0x8c, 0x8d, 0x3d, 0x59, 0xb1, 0xaa,
0x7a, 0x41, 0x88, 0x43, 0x25, 0x38, 0xc0, 0x11, 0x71, 0xe2, 0xc2, 0x81, 0x7f, 0xc2, 0x91, 0x23,
0x27, 0x24, 0x7e, 0x00, 0x3f, 0x01, 0x8d, 0x67, 0xc6, 0x89, 0xb3, 0x76, 0xec, 0x08, 0x21, 0xf5,
0x66, 0xcf, 0xbc, 0xf7, 0xe6, 0x9b, 0xef, 0xfb, 0xfc, 0x5e, 0x02, 0xef, 0xba, 0x41, 0xe0, 0xfa,
0xd4, 0x39, 0xf1, 0x5c, 0x4e, 0x4e, 0x7c, 0xea, 0x90, 0xf1, 0xc4, 0x63, 0xce, 0x59, 0x2f, 0x5d,
0x19, 0x7a, 0x2c, 0xe6, 0x84, 0x8d, 0xe8, 0x30, 0xd9, 0xb2, 0xc3, 0x28, 0xe0, 0x01, 0x6e, 0xcb,
0x3c, 0x5b, 0x47, 0xd9, 0x72, 0xf3, 0xac, 0x67, 0x5e, 0x53, 0x15, 0x49, 0xe8, 0x39, 0x84, 0xb1,
0x80, 0x13, 0xee, 0x05, 0x2c, 0x96, 0x79, 0xe6, 0x9d, 0xc2, 0xf3, 0xf4, 0x31, 0x2a, 0xf0, 0x96,
0x0a, 0xf4, 0x03, 0xe6, 0x46, 0x53, 0xc6, 0x3c, 0xe6, 0x3a, 0x41, 0x48, 0xa3, 0x4c, 0xb5, 0x37,
0x54, 0x50, 0xf2, 0x76, 0x32, 0x3d, 0x75, 0xe8, 0x24, 0xe4, 0xe7, 0x6a, 0xf3, 0xc6, 0xe2, 0x26,
0xf7, 0x26, 0x34, 0xe6, 0x64, 0x12, 0xca, 0x00, 0xeb, 0xf7, 0x1a, 0xb4, 0xee, 0x47, 0x94, 0x70,
0x7a, 0xa8, 0xce, 0x1e, 0xd0, 0x2f, 0xa7, 0x34, 0xe6, 0x78, 0x0b, 0x2e, 0x87, 0x24, 0xa2, 0x8c,
0xb7, 0xd1, 0x36, 0xea, 0x34, 0x06, 0xea, 0x0d, 0xdf, 0x00, 0x23, 0x65, 0xc3, 0x1b, 0xb7, 0x6b,
0xc9, 0x26, 0xe8, 0xa5, 0xc3, 0x31, 0x7e, 0x1f, 0xae, 0xe8, 0xb7, 0x76, 0x7d, 0x1b, 0x75, 0x8c,
0x9e, 0x65, 0x17, 0x31, 0x65, 0xa7, 0xa7, 0xa6, 0x39, 0xf8, 0x53, 0xb8, 0x32, 0xf2, 0xa7, 0x31,
0xa7, 0x51, 0xdc, 0xbe, 0xb4, 0x5d, 0xef, 0x18, 0xbd, 0x7e, 0x71, 0x7e, 0x2e, 0x76, 0xfb, 0xbe,
0xca, 0x7f, 0xc0, 0x78, 0x74, 0x3e, 0x48, 0xcb, 0x99, 0x9f, 0x41, 0x33, 0xb3, 0x85, 0x37, 0xa0,
0xfe, 0x94, 0x9e, 0xab, 0x1b, 0x8a, 0x47, 0xbc, 0x0b, 0xaf, 0x9c, 0x11, 0x7f, 0x4a, 0x93, 0x8b,
0x19, 0xbd, 0x9b, 0x4b, 0x8e, 0x96, 0x95, 0x06, 0x32, 0x7e, 0xaf, 0xf6, 0x1e, 0xb2, 0x3a, 0x80,
0x1f, 0x52, 0xbe, 0xc8, 0x24, 0x86, 0x4b, 0x8c, 0x4c, 0xa8, 0x3a, 0x25, 0x79, 0xb6, 0x1e, 0xc1,
0xe6, 0x91, 0x17, 0xa7, 0xa1, 0x71, 0x19, 0xeb, 0xd7, 0x01, 0x42, 0xe2, 0xd2, 0x21, 0x0f, 0x9e,
0x52, 0xa6, 0x48, 0x6f, 0x88, 0x95, 0xc7, 0x62, 0xc1, 0xfa, 0x0d, 0x41, 0x6b, 0xa1, 0x5e, 0x1c,
0x06, 0x2c, 0xa6, 0xf8, 0x03, 0x68, 0x68, 0x66, 0xe3, 0x36, 0x4a, 0xe8, 0xac, 0x22, 0xc7, 0x2c,
0x09, 0xdf, 0x85, 0x8d, 0x53, 0xe2, 0xf9, 0x74, 0x3c, 0xf4, 0x83, 0x91, 0xb4, 0x5e, 0xbb, 0xb6,
0x5d, 0xef, 0x34, 0x06, 0xeb, 0x72, 0xfd, 0x48, 0x2f, 0xe3, 0xdb, 0xb0, 0xce, 0xe8, 0x57, 0x7c,
0x38, 0x07, 0xb5, 0x9e, 0x40, 0x6d, 0x8a, 0xe5, 0xe3, 0x14, 0xee, 0x3d, 0x68, 0x7d, 0x44, 0x7d,
0x7a, 0xd1, 0x74, 0x79, 0x54, 0xbd, 0x40, 0xb0, 0x29, 0x65, 0xd6, 0x8c, 0x97, 0x73, 0xa5, 0x14,
0x9f, 0x19, 0xb4, 0xa1, 0x56, 0x0e, 0xc7, 0x78, 0x1f, 0x5e, 0x55, 0x2f, 0xca, 0x9e, 0x15, 0x34,
0xd6, 0x19, 0xd6, 0x1d, 0x78, 0xed, 0x21, 0xe5, 0x0b, 0x40, 0xf2, 0x50, 0x1f, 0xc1, 0xeb, 0x42,
0x10, 0x6d, 0xb7, 0xff, 0xa8, 0xef, 0xaf, 0x48, 0xfa, 0x65, 0x56, 0x4e, 0xc9, 0xdb, 0x9f, 0xfb,
0x58, 0xa4, 0xba, 0x15, 0x6e, 0x93, 0xa6, 0xfc, 0x1f, 0xda, 0x76, 0x61, 0x53, 0x6a, 0x5b, 0x81,
0xa4, 0x7f, 0x10, 0x6c, 0x65, 0xbf, 0xe0, 0x47, 0x94, 0x93, 0x31, 0xe1, 0x04, 0x3f, 0x81, 0x8d,
0x20, 0xf2, 0x5c, 0x8f, 0x11, 0x7f, 0x18, 0xc9, 0x12, 0x49, 0xaa, 0xd1, 0x73, 0x56, 0xec, 0x06,
0x83, 0x75, 0x5d, 0x48, 0x43, 0xe9, 0xc3, 0x55, 0x55, 0x72, 0x28, 0xfa, 0xa1, 0xfa, 0xd4, 0x4d,
0x5d, 0x57, 0x37, 0x4b, 0xfb, 0xb1, 0x6e, 0x96, 0x03, 0x43, 0xc5, 0x8b, 0x15, 0xbc, 0x0f, 0xc6,
0xa9, 0xc7, 0xbc, 0xf8, 0x73, 0x99, 0x5d, 0x2f, 0xcd, 0x06, 0x19, 0x2e, 0x16, 0xac, 0xbf, 0x10,
0xb4, 0x3e, 0x09, 0xc7, 0x33, 0x37, 0xa7, 0x37, 0x3e, 0x2a, 0xbc, 0x71, 0x05, 0x49, 0x5f, 0xa6,
0x3b, 0xf6, 0xfe, 0x34, 0xa0, 0x75, 0xa0, 0xa0, 0x6a, 0x31, 0x3e, 0x14, 0x88, 0xf1, 0xf7, 0x08,
0xd6, 0xb2, 0x22, 0xe1, 0x55, 0xe5, 0x34, 0xaf, 0xeb, 0x84, 0xb9, 0xb1, 0x68, 0x7f, 0xac, 0xc7,
0xa2, 0xb5, 0xf3, 0xf5, 0x1f, 0x7f, 0xff, 0x58, 0xbb, 0x6d, 0xdd, 0x14, 0x03, 0xf5, 0x99, 0xfc,
0xbc, 0xfa, 0x61, 0x14, 0x7c, 0x41, 0x47, 0x3c, 0x76, 0xba, 0xcf, 0xd3, 0x21, 0x1b, 0xef, 0xa1,
0x2e, 0x7e, 0x81, 0xc0, 0x98, 0x6b, 0xd9, 0x78, 0xa7, 0x18, 0xcd, 0xc5, 0xce, 0x6e, 0x56, 0xe8,
0xa4, 0xd6, 0xdd, 0x04, 0xcf, 0x2d, 0x2c, 0xf1, 0x08, 0xdb, 0xcf, 0xa1, 0x99, 0x81, 0x71, 0xba,
0xcf, 0xf1, 0x4f, 0x08, 0x9a, 0x99, 0x2e, 0x8e, 0xed, 0xe2, 0x03, 0xf2, 0xc6, 0x87, 0xe9, 0x54,
0x8e, 0x97, 0xfd, 0x63, 0x01, 0xdd, 0x32, 0xb6, 0xf0, 0xb7, 0x08, 0xd6, 0xa4, 0x73, 0x53, 0xb6,
0x2a, 0xdc, 0xbf, 0x12, 0x47, 0x4a, 0x33, 0xb3, 0x9c, 0x23, 0xa1, 0xd9, 0x37, 0x08, 0xd6, 0xb2,
0xe3, 0x63, 0x99, 0x89, 0x72, 0x07, 0x8d, 0xb9, 0x75, 0xc1, 0xca, 0x0f, 0xc4, 0xcf, 0x26, 0xcd,
0x47, 0xb7, 0x82, 0x5a, 0x3f, 0x23, 0x68, 0x66, 0xe6, 0xd2, 0x32, 0xb5, 0xf2, 0x06, 0x58, 0x99,
0x93, 0xfb, 0x09, 0x96, 0x5d, 0x6b, 0x27, 0x5f, 0x9b, 0x0c, 0x1a, 0x47, 0xb7, 0xf4, 0x3d, 0x3d,
0xaa, 0xf0, 0x0f, 0x08, 0x60, 0x36, 0xab, 0xf0, 0xbd, 0xa5, 0xce, 0x5e, 0x40, 0x56, 0xde, 0x71,
0xac, 0x77, 0x12, 0x74, 0x36, 0xde, 0x29, 0x63, 0x2a, 0x85, 0x26, 0x48, 0xfb, 0x05, 0xc1, 0xd5,
0xf9, 0x41, 0x86, 0xdf, 0x5c, 0xee, 0xd8, 0x85, 0xf9, 0x69, 0xda, 0x55, 0xc3, 0x95, 0xbf, 0xb3,
0x28, 0x2b, 0x72, 0x28, 0xba, 0x42, 0x33, 0xd3, 0xa4, 0x71, 0x39, 0x21, 0x65, 0x6a, 0xee, 0x26,
0x48, 0xde, 0x32, 0x57, 0xe2, 0x4b, 0xd8, 0xfd, 0x3b, 0x04, 0xcd, 0xcc, 0x44, 0x5d, 0xe6, 0xb3,
0xbc, 0xd1, 0x5b, 0x68, 0x76, 0x45, 0x4e, 0x77, 0x25, 0x48, 0x07, 0xcf, 0xe0, 0xda, 0x28, 0x98,
0x14, 0x42, 0x38, 0x30, 0x73, 0x5b, 0xff, 0xb1, 0x38, 0xfa, 0x18, 0x3d, 0xe9, 0xab, 0x3c, 0x37,
0xf0, 0x09, 0x73, 0xed, 0x20, 0x72, 0x1d, 0x97, 0xb2, 0x04, 0x98, 0x23, 0xb7, 0x48, 0xe8, 0xc5,
0x17, 0xff, 0x1b, 0xed, 0x27, 0x0f, 0x27, 0x97, 0x93, 0xc8, 0xb7, 0xff, 0x0d, 0x00, 0x00, 0xff,
0xff, 0xb8, 0x4b, 0x18, 0x4a, 0xb4, 0x0d, 0x00, 0x00,
}

View File

@ -0,0 +1,896 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/bigtable/admin/v2/bigtable_table_admin.proto
package admin
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import _ "google.golang.org/genproto/googleapis/api/serviceconfig"
import _ "google.golang.org/genproto/googleapis/longrunning"
import _ "github.com/golang/protobuf/ptypes/duration"
import google_protobuf3 "github.com/golang/protobuf/ptypes/empty"
import _ "github.com/golang/protobuf/ptypes/timestamp"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Request message for
// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable]
type CreateTableRequest struct {
// The unique name of the instance in which to create the table.
// Values are of the form `projects/<project>/instances/<instance>`.
Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"`
// The name by which the new table should be referred to within the parent
// instance, e.g., `foobar` rather than `<parent>/tables/foobar`.
TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId" json:"table_id,omitempty"`
// The Table to create.
Table *Table `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"`
// The optional list of row keys that will be used to initially split the
// table into several tablets (tablets are similar to HBase regions).
// Given two split keys, `s1` and `s2`, three tablets will be created,
// spanning the key ranges: `[, s1), [s1, s2), [s2, )`.
//
// Example:
//
// * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",`
// `"other", "zz"]`
// * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]`
// * Key assignment:
// - Tablet 1 `[, apple) => {"a"}.`
// - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.`
// - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.`
// - Tablet 4 `[customer_2, other) => {"customer_2"}.`
// - Tablet 5 `[other, ) => {"other", "zz"}.`
InitialSplits []*CreateTableRequest_Split `protobuf:"bytes,4,rep,name=initial_splits,json=initialSplits" json:"initial_splits,omitempty"`
}
func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} }
func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) }
func (*CreateTableRequest) ProtoMessage() {}
func (*CreateTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (m *CreateTableRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *CreateTableRequest) GetTableId() string {
if m != nil {
return m.TableId
}
return ""
}
func (m *CreateTableRequest) GetTable() *Table {
if m != nil {
return m.Table
}
return nil
}
func (m *CreateTableRequest) GetInitialSplits() []*CreateTableRequest_Split {
if m != nil {
return m.InitialSplits
}
return nil
}
// An initial split point for a newly created table.
type CreateTableRequest_Split struct {
// Row key to use as an initial tablet boundary.
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
}
func (m *CreateTableRequest_Split) Reset() { *m = CreateTableRequest_Split{} }
func (m *CreateTableRequest_Split) String() string { return proto.CompactTextString(m) }
func (*CreateTableRequest_Split) ProtoMessage() {}
func (*CreateTableRequest_Split) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} }
func (m *CreateTableRequest_Split) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
// Request message for
// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange]
type DropRowRangeRequest struct {
// The unique name of the table on which to drop a range of rows.
// Values are of the form
// `projects/<project>/instances/<instance>/tables/<table>`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Delete all rows or by prefix.
//
// Types that are valid to be assigned to Target:
// *DropRowRangeRequest_RowKeyPrefix
// *DropRowRangeRequest_DeleteAllDataFromTable
Target isDropRowRangeRequest_Target `protobuf_oneof:"target"`
}
func (m *DropRowRangeRequest) Reset() { *m = DropRowRangeRequest{} }
func (m *DropRowRangeRequest) String() string { return proto.CompactTextString(m) }
func (*DropRowRangeRequest) ProtoMessage() {}
func (*DropRowRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
type isDropRowRangeRequest_Target interface {
isDropRowRangeRequest_Target()
}
type DropRowRangeRequest_RowKeyPrefix struct {
RowKeyPrefix []byte `protobuf:"bytes,2,opt,name=row_key_prefix,json=rowKeyPrefix,proto3,oneof"`
}
type DropRowRangeRequest_DeleteAllDataFromTable struct {
DeleteAllDataFromTable bool `protobuf:"varint,3,opt,name=delete_all_data_from_table,json=deleteAllDataFromTable,oneof"`
}
func (*DropRowRangeRequest_RowKeyPrefix) isDropRowRangeRequest_Target() {}
func (*DropRowRangeRequest_DeleteAllDataFromTable) isDropRowRangeRequest_Target() {}
func (m *DropRowRangeRequest) GetTarget() isDropRowRangeRequest_Target {
if m != nil {
return m.Target
}
return nil
}
func (m *DropRowRangeRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *DropRowRangeRequest) GetRowKeyPrefix() []byte {
if x, ok := m.GetTarget().(*DropRowRangeRequest_RowKeyPrefix); ok {
return x.RowKeyPrefix
}
return nil
}
func (m *DropRowRangeRequest) GetDeleteAllDataFromTable() bool {
if x, ok := m.GetTarget().(*DropRowRangeRequest_DeleteAllDataFromTable); ok {
return x.DeleteAllDataFromTable
}
return false
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*DropRowRangeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _DropRowRangeRequest_OneofMarshaler, _DropRowRangeRequest_OneofUnmarshaler, _DropRowRangeRequest_OneofSizer, []interface{}{
(*DropRowRangeRequest_RowKeyPrefix)(nil),
(*DropRowRangeRequest_DeleteAllDataFromTable)(nil),
}
}
func _DropRowRangeRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*DropRowRangeRequest)
// target
switch x := m.Target.(type) {
case *DropRowRangeRequest_RowKeyPrefix:
b.EncodeVarint(2<<3 | proto.WireBytes)
b.EncodeRawBytes(x.RowKeyPrefix)
case *DropRowRangeRequest_DeleteAllDataFromTable:
t := uint64(0)
if x.DeleteAllDataFromTable {
t = 1
}
b.EncodeVarint(3<<3 | proto.WireVarint)
b.EncodeVarint(t)
case nil:
default:
return fmt.Errorf("DropRowRangeRequest.Target has unexpected type %T", x)
}
return nil
}
func _DropRowRangeRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*DropRowRangeRequest)
switch tag {
case 2: // target.row_key_prefix
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeRawBytes(true)
m.Target = &DropRowRangeRequest_RowKeyPrefix{x}
return true, err
case 3: // target.delete_all_data_from_table
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Target = &DropRowRangeRequest_DeleteAllDataFromTable{x != 0}
return true, err
default:
return false, nil
}
}
func _DropRowRangeRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*DropRowRangeRequest)
// target
switch x := m.Target.(type) {
case *DropRowRangeRequest_RowKeyPrefix:
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.RowKeyPrefix)))
n += len(x.RowKeyPrefix)
case *DropRowRangeRequest_DeleteAllDataFromTable:
n += proto.SizeVarint(3<<3 | proto.WireVarint)
n += 1
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
// Request message for
// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
type ListTablesRequest struct {
// The unique name of the instance for which tables should be listed.
// Values are of the form `projects/<project>/instances/<instance>`.
Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"`
// The view to be applied to the returned tables' fields.
// Defaults to `NAME_ONLY` if unspecified; no others are currently supported.
View Table_View `protobuf:"varint,2,opt,name=view,enum=google.bigtable.admin.v2.Table_View" json:"view,omitempty"`
// The value of `next_page_token` returned by a previous call.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
}
func (m *ListTablesRequest) Reset() { *m = ListTablesRequest{} }
func (m *ListTablesRequest) String() string { return proto.CompactTextString(m) }
func (*ListTablesRequest) ProtoMessage() {}
func (*ListTablesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} }
func (m *ListTablesRequest) GetParent() string {
if m != nil {
return m.Parent
}
return ""
}
func (m *ListTablesRequest) GetView() Table_View {
if m != nil {
return m.View
}
return Table_VIEW_UNSPECIFIED
}
func (m *ListTablesRequest) GetPageToken() string {
if m != nil {
return m.PageToken
}
return ""
}
// Response message for
// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
type ListTablesResponse struct {
// The tables present in the requested instance.
Tables []*Table `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"`
// Set if not all tables could be returned in a single response.
// Pass this value to `page_token` in another request to get the next
// page of results.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
}
func (m *ListTablesResponse) Reset() { *m = ListTablesResponse{} }
func (m *ListTablesResponse) String() string { return proto.CompactTextString(m) }
func (*ListTablesResponse) ProtoMessage() {}
func (*ListTablesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} }
func (m *ListTablesResponse) GetTables() []*Table {
if m != nil {
return m.Tables
}
return nil
}
func (m *ListTablesResponse) GetNextPageToken() string {
if m != nil {
return m.NextPageToken
}
return ""
}
// Request message for
// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable]
type GetTableRequest struct {
// The unique name of the requested table.
// Values are of the form
// `projects/<project>/instances/<instance>/tables/<table>`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// The view to be applied to the returned table's fields.
// Defaults to `SCHEMA_ONLY` if unspecified.
View Table_View `protobuf:"varint,2,opt,name=view,enum=google.bigtable.admin.v2.Table_View" json:"view,omitempty"`
}
func (m *GetTableRequest) Reset() { *m = GetTableRequest{} }
func (m *GetTableRequest) String() string { return proto.CompactTextString(m) }
func (*GetTableRequest) ProtoMessage() {}
func (*GetTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} }
func (m *GetTableRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *GetTableRequest) GetView() Table_View {
if m != nil {
return m.View
}
return Table_VIEW_UNSPECIFIED
}
// Request message for
// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable]
type DeleteTableRequest struct {
// The unique name of the table to be deleted.
// Values are of the form
// `projects/<project>/instances/<instance>/tables/<table>`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} }
func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteTableRequest) ProtoMessage() {}
func (*DeleteTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} }
func (m *DeleteTableRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// Request message for
// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies]
type ModifyColumnFamiliesRequest struct {
// The unique name of the table whose families should be modified.
// Values are of the form
// `projects/<project>/instances/<instance>/tables/<table>`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Modifications to be atomically applied to the specified table's families.
// Entries are applied in order, meaning that earlier modifications can be
// masked by later ones (in the case of repeated updates to the same family,
// for example).
Modifications []*ModifyColumnFamiliesRequest_Modification `protobuf:"bytes,2,rep,name=modifications" json:"modifications,omitempty"`
}
func (m *ModifyColumnFamiliesRequest) Reset() { *m = ModifyColumnFamiliesRequest{} }
func (m *ModifyColumnFamiliesRequest) String() string { return proto.CompactTextString(m) }
func (*ModifyColumnFamiliesRequest) ProtoMessage() {}
func (*ModifyColumnFamiliesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} }
func (m *ModifyColumnFamiliesRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ModifyColumnFamiliesRequest) GetModifications() []*ModifyColumnFamiliesRequest_Modification {
if m != nil {
return m.Modifications
}
return nil
}
// A create, update, or delete of a particular column family.
type ModifyColumnFamiliesRequest_Modification struct {
// The ID of the column family to be modified.
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
// Column familiy modifications.
//
// Types that are valid to be assigned to Mod:
// *ModifyColumnFamiliesRequest_Modification_Create
// *ModifyColumnFamiliesRequest_Modification_Update
// *ModifyColumnFamiliesRequest_Modification_Drop
Mod isModifyColumnFamiliesRequest_Modification_Mod `protobuf_oneof:"mod"`
}
func (m *ModifyColumnFamiliesRequest_Modification) Reset() {
*m = ModifyColumnFamiliesRequest_Modification{}
}
func (m *ModifyColumnFamiliesRequest_Modification) String() string { return proto.CompactTextString(m) }
func (*ModifyColumnFamiliesRequest_Modification) ProtoMessage() {}
func (*ModifyColumnFamiliesRequest_Modification) Descriptor() ([]byte, []int) {
return fileDescriptor1, []int{6, 0}
}
type isModifyColumnFamiliesRequest_Modification_Mod interface {
isModifyColumnFamiliesRequest_Modification_Mod()
}
type ModifyColumnFamiliesRequest_Modification_Create struct {
Create *ColumnFamily `protobuf:"bytes,2,opt,name=create,oneof"`
}
type ModifyColumnFamiliesRequest_Modification_Update struct {
Update *ColumnFamily `protobuf:"bytes,3,opt,name=update,oneof"`
}
type ModifyColumnFamiliesRequest_Modification_Drop struct {
Drop bool `protobuf:"varint,4,opt,name=drop,oneof"`
}
func (*ModifyColumnFamiliesRequest_Modification_Create) isModifyColumnFamiliesRequest_Modification_Mod() {
}
func (*ModifyColumnFamiliesRequest_Modification_Update) isModifyColumnFamiliesRequest_Modification_Mod() {
}
func (*ModifyColumnFamiliesRequest_Modification_Drop) isModifyColumnFamiliesRequest_Modification_Mod() {
}
func (m *ModifyColumnFamiliesRequest_Modification) GetMod() isModifyColumnFamiliesRequest_Modification_Mod {
if m != nil {
return m.Mod
}
return nil
}
func (m *ModifyColumnFamiliesRequest_Modification) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *ModifyColumnFamiliesRequest_Modification) GetCreate() *ColumnFamily {
if x, ok := m.GetMod().(*ModifyColumnFamiliesRequest_Modification_Create); ok {
return x.Create
}
return nil
}
func (m *ModifyColumnFamiliesRequest_Modification) GetUpdate() *ColumnFamily {
if x, ok := m.GetMod().(*ModifyColumnFamiliesRequest_Modification_Update); ok {
return x.Update
}
return nil
}
func (m *ModifyColumnFamiliesRequest_Modification) GetDrop() bool {
if x, ok := m.GetMod().(*ModifyColumnFamiliesRequest_Modification_Drop); ok {
return x.Drop
}
return false
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*ModifyColumnFamiliesRequest_Modification) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _ModifyColumnFamiliesRequest_Modification_OneofMarshaler, _ModifyColumnFamiliesRequest_Modification_OneofUnmarshaler, _ModifyColumnFamiliesRequest_Modification_OneofSizer, []interface{}{
(*ModifyColumnFamiliesRequest_Modification_Create)(nil),
(*ModifyColumnFamiliesRequest_Modification_Update)(nil),
(*ModifyColumnFamiliesRequest_Modification_Drop)(nil),
}
}
func _ModifyColumnFamiliesRequest_Modification_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*ModifyColumnFamiliesRequest_Modification)
// mod
switch x := m.Mod.(type) {
case *ModifyColumnFamiliesRequest_Modification_Create:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Create); err != nil {
return err
}
case *ModifyColumnFamiliesRequest_Modification_Update:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Update); err != nil {
return err
}
case *ModifyColumnFamiliesRequest_Modification_Drop:
t := uint64(0)
if x.Drop {
t = 1
}
b.EncodeVarint(4<<3 | proto.WireVarint)
b.EncodeVarint(t)
case nil:
default:
return fmt.Errorf("ModifyColumnFamiliesRequest_Modification.Mod has unexpected type %T", x)
}
return nil
}
func _ModifyColumnFamiliesRequest_Modification_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*ModifyColumnFamiliesRequest_Modification)
switch tag {
case 2: // mod.create
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ColumnFamily)
err := b.DecodeMessage(msg)
m.Mod = &ModifyColumnFamiliesRequest_Modification_Create{msg}
return true, err
case 3: // mod.update
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ColumnFamily)
err := b.DecodeMessage(msg)
m.Mod = &ModifyColumnFamiliesRequest_Modification_Update{msg}
return true, err
case 4: // mod.drop
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Mod = &ModifyColumnFamiliesRequest_Modification_Drop{x != 0}
return true, err
default:
return false, nil
}
}
func _ModifyColumnFamiliesRequest_Modification_OneofSizer(msg proto.Message) (n int) {
m := msg.(*ModifyColumnFamiliesRequest_Modification)
// mod
switch x := m.Mod.(type) {
case *ModifyColumnFamiliesRequest_Modification_Create:
s := proto.Size(x.Create)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ModifyColumnFamiliesRequest_Modification_Update:
s := proto.Size(x.Update)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ModifyColumnFamiliesRequest_Modification_Drop:
n += proto.SizeVarint(4<<3 | proto.WireVarint)
n += 1
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
func init() {
proto.RegisterType((*CreateTableRequest)(nil), "google.bigtable.admin.v2.CreateTableRequest")
proto.RegisterType((*CreateTableRequest_Split)(nil), "google.bigtable.admin.v2.CreateTableRequest.Split")
proto.RegisterType((*DropRowRangeRequest)(nil), "google.bigtable.admin.v2.DropRowRangeRequest")
proto.RegisterType((*ListTablesRequest)(nil), "google.bigtable.admin.v2.ListTablesRequest")
proto.RegisterType((*ListTablesResponse)(nil), "google.bigtable.admin.v2.ListTablesResponse")
proto.RegisterType((*GetTableRequest)(nil), "google.bigtable.admin.v2.GetTableRequest")
proto.RegisterType((*DeleteTableRequest)(nil), "google.bigtable.admin.v2.DeleteTableRequest")
proto.RegisterType((*ModifyColumnFamiliesRequest)(nil), "google.bigtable.admin.v2.ModifyColumnFamiliesRequest")
proto.RegisterType((*ModifyColumnFamiliesRequest_Modification)(nil), "google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for BigtableTableAdmin service
type BigtableTableAdminClient interface {
// Creates a new table in the specified instance.
// The table can be created with a full set of initial column families,
// specified in the request.
CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*Table, error)
// Lists all tables served from a specified instance.
ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error)
// Gets metadata information about the specified table.
GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error)
// Permanently deletes a specified table and all of its data.
DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error)
// Atomically performs a series of column family modifications
// on the specified table.
ModifyColumnFamilies(ctx context.Context, in *ModifyColumnFamiliesRequest, opts ...grpc.CallOption) (*Table, error)
// Permanently drop/delete a row range from a specified table. The request can
// specify whether to delete all rows in a table, or only those that match a
// particular prefix.
DropRowRange(ctx context.Context, in *DropRowRangeRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error)
}
type bigtableTableAdminClient struct {
cc *grpc.ClientConn
}
func NewBigtableTableAdminClient(cc *grpc.ClientConn) BigtableTableAdminClient {
return &bigtableTableAdminClient{cc}
}
func (c *bigtableTableAdminClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*Table, error) {
out := new(Table)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableTableAdminClient) ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) {
out := new(ListTablesResponse)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableTableAdminClient) GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error) {
out := new(Table)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableTableAdminClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) {
out := new(google_protobuf3.Empty)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableTableAdminClient) ModifyColumnFamilies(ctx context.Context, in *ModifyColumnFamiliesRequest, opts ...grpc.CallOption) (*Table, error) {
out := new(Table)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *bigtableTableAdminClient) DropRowRange(ctx context.Context, in *DropRowRangeRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) {
out := new(google_protobuf3.Empty)
err := grpc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for BigtableTableAdmin service
type BigtableTableAdminServer interface {
// Creates a new table in the specified instance.
// The table can be created with a full set of initial column families,
// specified in the request.
CreateTable(context.Context, *CreateTableRequest) (*Table, error)
// Lists all tables served from a specified instance.
ListTables(context.Context, *ListTablesRequest) (*ListTablesResponse, error)
// Gets metadata information about the specified table.
GetTable(context.Context, *GetTableRequest) (*Table, error)
// Permanently deletes a specified table and all of its data.
DeleteTable(context.Context, *DeleteTableRequest) (*google_protobuf3.Empty, error)
// Atomically performs a series of column family modifications
// on the specified table.
ModifyColumnFamilies(context.Context, *ModifyColumnFamiliesRequest) (*Table, error)
// Permanently drop/delete a row range from a specified table. The request can
// specify whether to delete all rows in a table, or only those that match a
// particular prefix.
DropRowRange(context.Context, *DropRowRangeRequest) (*google_protobuf3.Empty, error)
}
func RegisterBigtableTableAdminServer(s *grpc.Server, srv BigtableTableAdminServer) {
s.RegisterService(&_BigtableTableAdmin_serviceDesc, srv)
}
func _BigtableTableAdmin_CreateTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateTableRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableTableAdminServer).CreateTable(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableTableAdminServer).CreateTable(ctx, req.(*CreateTableRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableTableAdmin_ListTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListTablesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableTableAdminServer).ListTables(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableTableAdminServer).ListTables(ctx, req.(*ListTablesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableTableAdmin_GetTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetTableRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableTableAdminServer).GetTable(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableTableAdminServer).GetTable(ctx, req.(*GetTableRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableTableAdmin_DeleteTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteTableRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableTableAdminServer).DeleteTable(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableTableAdminServer).DeleteTable(ctx, req.(*DeleteTableRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableTableAdmin_ModifyColumnFamilies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ModifyColumnFamiliesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableTableAdminServer).ModifyColumnFamilies(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableTableAdminServer).ModifyColumnFamilies(ctx, req.(*ModifyColumnFamiliesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BigtableTableAdmin_DropRowRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DropRowRangeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BigtableTableAdminServer).DropRowRange(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BigtableTableAdminServer).DropRowRange(ctx, req.(*DropRowRangeRequest))
}
return interceptor(ctx, in, info, handler)
}
var _BigtableTableAdmin_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.bigtable.admin.v2.BigtableTableAdmin",
HandlerType: (*BigtableTableAdminServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateTable",
Handler: _BigtableTableAdmin_CreateTable_Handler,
},
{
MethodName: "ListTables",
Handler: _BigtableTableAdmin_ListTables_Handler,
},
{
MethodName: "GetTable",
Handler: _BigtableTableAdmin_GetTable_Handler,
},
{
MethodName: "DeleteTable",
Handler: _BigtableTableAdmin_DeleteTable_Handler,
},
{
MethodName: "ModifyColumnFamilies",
Handler: _BigtableTableAdmin_ModifyColumnFamilies_Handler,
},
{
MethodName: "DropRowRange",
Handler: _BigtableTableAdmin_DropRowRange_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/bigtable/admin/v2/bigtable_table_admin.proto",
}
func init() {
proto.RegisterFile("google/bigtable/admin/v2/bigtable_table_admin.proto", fileDescriptor1)
}
var fileDescriptor1 = []byte{
// 910 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x6e, 0x1b, 0x45,
0x14, 0xce, 0xd8, 0x4e, 0x9a, 0x1c, 0x3b, 0x29, 0x0c, 0x25, 0xb8, 0x6e, 0xa1, 0xd1, 0x52, 0x45,
0xc1, 0x84, 0x5d, 0x69, 0xab, 0xa8, 0x28, 0xb4, 0x82, 0xba, 0xa1, 0x84, 0x3f, 0x29, 0x5a, 0x2a,
0x24, 0xb8, 0x59, 0x4d, 0xbc, 0x93, 0xed, 0x90, 0xdd, 0x99, 0x65, 0x77, 0x1c, 0xc7, 0x42, 0xbd,
0x41, 0x48, 0x48, 0xdc, 0xf6, 0xaa, 0xe2, 0x45, 0x10, 0x37, 0x3c, 0x04, 0xd7, 0xdc, 0xf1, 0x08,
0x3c, 0x00, 0x9a, 0x1f, 0x27, 0x9b, 0x38, 0x1b, 0xd7, 0xb9, 0xb1, 0x66, 0xce, 0xf9, 0xce, 0x39,
0xdf, 0x9c, 0x33, 0xdf, 0x8e, 0xe1, 0x5e, 0x2c, 0x44, 0x9c, 0x50, 0x6f, 0x9f, 0xc5, 0x92, 0xec,
0x27, 0xd4, 0x23, 0x51, 0xca, 0xb8, 0x77, 0xe4, 0x9f, 0x58, 0x42, 0xf3, 0xab, 0xed, 0x6e, 0x96,
0x0b, 0x29, 0x70, 0xdb, 0x04, 0xb9, 0x63, 0x88, 0x6b, 0x9c, 0x47, 0x7e, 0xe7, 0xb6, 0x4d, 0x47,
0x32, 0xe6, 0x11, 0xce, 0x85, 0x24, 0x92, 0x09, 0x5e, 0x98, 0xb8, 0xce, 0x9b, 0x65, 0xef, 0x40,
0x3e, 0xb3, 0xe6, 0xbb, 0x95, 0x1c, 0x4c, 0x76, 0x83, 0x7a, 0xd7, 0xa2, 0x12, 0xc1, 0xe3, 0x7c,
0xc0, 0x39, 0xe3, 0xb1, 0x27, 0x32, 0x9a, 0x9f, 0xa9, 0xf0, 0x8e, 0x05, 0xe9, 0xdd, 0xfe, 0xe0,
0xc0, 0x8b, 0x06, 0x06, 0x60, 0xfd, 0xb7, 0xce, 0xfb, 0x69, 0x9a, 0xc9, 0x91, 0x75, 0xde, 0x39,
0xef, 0x94, 0x2c, 0xa5, 0x85, 0x24, 0x69, 0x66, 0x00, 0xce, 0x7f, 0x08, 0xf0, 0xe3, 0x9c, 0x12,
0x49, 0x9f, 0x2a, 0x62, 0x01, 0xfd, 0x71, 0x40, 0x0b, 0x89, 0x57, 0x61, 0x21, 0x23, 0x39, 0xe5,
0xb2, 0x8d, 0xd6, 0xd0, 0xc6, 0x52, 0x60, 0x77, 0xf8, 0x26, 0x2c, 0x9a, 0xde, 0xb1, 0xa8, 0x5d,
0xd3, 0x9e, 0x6b, 0x7a, 0xff, 0x79, 0x84, 0xb7, 0x60, 0x5e, 0x2f, 0xdb, 0xf5, 0x35, 0xb4, 0xd1,
0xf4, 0xef, 0xb8, 0x55, 0x1d, 0x75, 0x4d, 0x25, 0x83, 0xc6, 0xdf, 0xc1, 0x0a, 0xe3, 0x4c, 0x32,
0x92, 0x84, 0x45, 0x96, 0x30, 0x59, 0xb4, 0x1b, 0x6b, 0xf5, 0x8d, 0xa6, 0xef, 0x57, 0xc7, 0x4f,
0xf2, 0x75, 0xbf, 0x51, 0xa1, 0xc1, 0xb2, 0xcd, 0xa4, 0x77, 0x45, 0xe7, 0x26, 0xcc, 0xeb, 0x15,
0x7e, 0x0d, 0xea, 0x87, 0x74, 0xa4, 0x8f, 0xd2, 0x0a, 0xd4, 0xd2, 0x79, 0x89, 0xe0, 0x8d, 0x9d,
0x5c, 0x64, 0x81, 0x18, 0x06, 0x84, 0xc7, 0x27, 0xe7, 0xc6, 0xd0, 0xe0, 0x24, 0xa5, 0xf6, 0xd4,
0x7a, 0x8d, 0xd7, 0x61, 0x25, 0x17, 0xc3, 0xf0, 0x90, 0x8e, 0xc2, 0x2c, 0xa7, 0x07, 0xec, 0x58,
0x9f, 0xbc, 0xb5, 0x3b, 0x17, 0xb4, 0x72, 0x31, 0xfc, 0x92, 0x8e, 0xf6, 0xb4, 0x15, 0x3f, 0x80,
0x4e, 0x44, 0x13, 0x2a, 0x69, 0x48, 0x92, 0x24, 0x8c, 0x88, 0x24, 0xe1, 0x41, 0x2e, 0xd2, 0xf0,
0xb4, 0x2b, 0x8b, 0xbb, 0x73, 0xc1, 0xaa, 0xc1, 0x3c, 0x4a, 0x92, 0x1d, 0x22, 0xc9, 0x93, 0x5c,
0xa4, 0xfa, 0x20, 0xbd, 0x45, 0x58, 0x90, 0x24, 0x8f, 0xa9, 0x74, 0x7e, 0x41, 0xf0, 0xfa, 0x57,
0xac, 0x90, 0xda, 0x5e, 0x4c, 0x9b, 0xc8, 0x87, 0xd0, 0x38, 0x62, 0x74, 0xa8, 0x39, 0xad, 0xf8,
0x77, 0xa7, 0x74, 0xdd, 0xfd, 0x96, 0xd1, 0x61, 0xa0, 0x23, 0xf0, 0xdb, 0x00, 0x19, 0x89, 0x69,
0x28, 0xc5, 0x21, 0xe5, 0x9a, 0xdf, 0x52, 0xb0, 0xa4, 0x2c, 0x4f, 0x95, 0xc1, 0x19, 0x00, 0x2e,
0xb3, 0x28, 0x32, 0xc1, 0x0b, 0x8a, 0xef, 0x2b, 0x9a, 0xca, 0xd2, 0x46, 0x7a, 0x4c, 0x53, 0xc7,
0x6c, 0xe1, 0x78, 0x1d, 0xae, 0x73, 0x7a, 0x2c, 0xc3, 0x52, 0x49, 0x73, 0x81, 0x96, 0x95, 0x79,
0xef, 0xa4, 0x6c, 0x08, 0xd7, 0x3f, 0xa3, 0xf2, 0xcc, 0x65, 0xbc, 0x68, 0x28, 0x57, 0x3e, 0xb6,
0xb3, 0x01, 0x78, 0x47, 0x8f, 0x60, 0x5a, 0x0d, 0xe7, 0x9f, 0x1a, 0xdc, 0xfa, 0x5a, 0x44, 0xec,
0x60, 0xf4, 0x58, 0x24, 0x83, 0x94, 0x3f, 0x21, 0x29, 0x4b, 0xd8, 0xe9, 0x48, 0x2e, 0xe2, 0xf5,
0x0c, 0x96, 0x53, 0x15, 0xc2, 0xfa, 0x46, 0xc4, 0xed, 0x9a, 0x6e, 0x53, 0xaf, 0x9a, 0xe0, 0x25,
0x15, 0x8c, 0xcf, 0xa6, 0x0a, 0xce, 0x26, 0xee, 0xfc, 0x85, 0xa0, 0x55, 0xf6, 0xe3, 0x15, 0xa8,
0xb1, 0xc8, 0x92, 0xa9, 0xb1, 0x08, 0x7f, 0x02, 0x0b, 0x7d, 0xad, 0x14, 0xdd, 0xa4, 0xa6, 0xbf,
0x7e, 0x89, 0xa2, 0x4e, 0xab, 0x8f, 0x76, 0xe7, 0x02, 0x1b, 0xa7, 0x32, 0x0c, 0xb2, 0x48, 0x65,
0xa8, 0xcf, 0x9a, 0xc1, 0xc4, 0xe1, 0x1b, 0xd0, 0x88, 0x72, 0x91, 0xb5, 0x1b, 0xf6, 0xf6, 0xeb,
0x5d, 0x6f, 0x1e, 0xea, 0xa9, 0x88, 0xfc, 0x3f, 0xae, 0x01, 0xee, 0xd9, 0x4c, 0x7a, 0x18, 0x8f,
0x54, 0x36, 0xfc, 0x02, 0x41, 0xb3, 0x24, 0x71, 0xbc, 0x39, 0xcb, 0x97, 0xa0, 0x33, 0xed, 0x42,
0x3a, 0x5b, 0x3f, 0xff, 0xfd, 0xef, 0x8b, 0x9a, 0xe7, 0x74, 0xd5, 0xd7, 0xf8, 0x27, 0xa3, 0xa2,
0x87, 0x59, 0x2e, 0x7e, 0xa0, 0x7d, 0x59, 0x78, 0x5d, 0x8f, 0xf1, 0x42, 0x12, 0xde, 0xa7, 0x85,
0xd7, 0x7d, 0x6e, 0xbe, 0xd6, 0xc5, 0x36, 0xea, 0xe2, 0xdf, 0x11, 0xc0, 0xa9, 0x1e, 0xf0, 0xfb,
0xd5, 0x65, 0x26, 0xb4, 0xdb, 0xd9, 0x7c, 0x35, 0xb0, 0x91, 0x98, 0xe3, 0x6b, 0x82, 0x9b, 0x78,
0x06, 0x82, 0xf8, 0x37, 0x04, 0x8b, 0x63, 0xd9, 0xe0, 0xf7, 0xaa, 0xcb, 0x9d, 0x93, 0xd6, 0xf4,
0x6e, 0x9d, 0x25, 0xa3, 0xae, 0x78, 0x05, 0x15, 0xcb, 0xc4, 0xeb, 0x3e, 0xc7, 0xbf, 0x22, 0x68,
0x96, 0x24, 0x76, 0xd9, 0x00, 0x27, 0x95, 0xd8, 0x59, 0x1d, 0xa3, 0xc7, 0x6f, 0x96, 0xfb, 0xa9,
0x7a, 0xd0, 0xc6, 0x4c, 0xba, 0xb3, 0x30, 0xf9, 0x13, 0xc1, 0x8d, 0x8b, 0xf4, 0x85, 0xb7, 0xae,
0xa4, 0xc7, 0xe9, 0xed, 0xfa, 0x42, 0x93, 0xdc, 0x71, 0x3e, 0x7e, 0x75, 0x92, 0xdb, 0xe9, 0x05,
0x05, 0xd5, 0x8d, 0x7b, 0x89, 0xa0, 0x55, 0x7e, 0xa3, 0xf0, 0x07, 0x97, 0xf4, 0x71, 0xf2, 0x2d,
0xab, 0x6c, 0x64, 0x4f, 0x73, 0x7c, 0xe0, 0xdc, 0x9f, 0x81, 0x63, 0x54, 0xca, 0xbf, 0x8d, 0xba,
0xbd, 0x63, 0xb8, 0xdd, 0x17, 0x69, 0x25, 0x9f, 0xde, 0x5b, 0x93, 0xba, 0xde, 0x53, 0x2c, 0xf6,
0xd0, 0xf7, 0x0f, 0x6d, 0x50, 0x2c, 0x12, 0xc2, 0x63, 0x57, 0xe4, 0xb1, 0x17, 0x53, 0xae, 0x39,
0x7a, 0xc6, 0x45, 0x32, 0x56, 0x4c, 0xfe, 0x73, 0xfa, 0x48, 0x2f, 0xf6, 0x17, 0x34, 0xf2, 0xde,
0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x5d, 0x18, 0x44, 0xe6, 0x09, 0x00, 0x00,
}

View File

@ -0,0 +1,68 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/bigtable/admin/v2/common.proto
package admin
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import _ "github.com/golang/protobuf/ptypes/timestamp"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Storage media types for persisting Bigtable data.
type StorageType int32
const (
// The user did not specify a storage type.
StorageType_STORAGE_TYPE_UNSPECIFIED StorageType = 0
// Flash (SSD) storage should be used.
StorageType_SSD StorageType = 1
// Magnetic drive (HDD) storage should be used.
StorageType_HDD StorageType = 2
)
var StorageType_name = map[int32]string{
0: "STORAGE_TYPE_UNSPECIFIED",
1: "SSD",
2: "HDD",
}
var StorageType_value = map[string]int32{
"STORAGE_TYPE_UNSPECIFIED": 0,
"SSD": 1,
"HDD": 2,
}
func (x StorageType) String() string {
return proto.EnumName(StorageType_name, int32(x))
}
func (StorageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
func init() {
proto.RegisterEnum("google.bigtable.admin.v2.StorageType", StorageType_name, StorageType_value)
}
func init() { proto.RegisterFile("google/bigtable/admin/v2/common.proto", fileDescriptor2) }
var fileDescriptor2 = []byte{
// 234 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0x31, 0x4b, 0xc4, 0x40,
0x10, 0x85, 0x3d, 0x05, 0x85, 0xbd, 0x26, 0xa4, 0x3a, 0x8e, 0x80, 0x95, 0x8d, 0xc5, 0x0e, 0x9c,
0xa5, 0x5c, 0xe1, 0x5d, 0xa2, 0x5e, 0xa3, 0xc1, 0x8d, 0x85, 0x36, 0xc7, 0xe4, 0x5c, 0x87, 0x85,
0xec, 0xce, 0x92, 0xac, 0x07, 0xfe, 0x7b, 0xc9, 0x6e, 0xac, 0xc4, 0xee, 0x0d, 0xef, 0x9b, 0x99,
0x37, 0x23, 0xae, 0x88, 0x99, 0x3a, 0x0d, 0xad, 0xa1, 0x80, 0x6d, 0xa7, 0x01, 0x3f, 0xac, 0x71,
0x70, 0x5c, 0xc1, 0x81, 0xad, 0x65, 0x27, 0x7d, 0xcf, 0x81, 0xf3, 0x45, 0xc2, 0xe4, 0x2f, 0x26,
0x23, 0x26, 0x8f, 0xab, 0x65, 0x31, 0x0d, 0x40, 0x6f, 0x00, 0x9d, 0xe3, 0x80, 0xc1, 0xb0, 0x1b,
0x52, 0xdf, 0xf2, 0x72, 0x72, 0x63, 0xd5, 0x7e, 0x7d, 0x42, 0x30, 0x56, 0x0f, 0x01, 0xad, 0x4f,
0xc0, 0xf5, 0x5a, 0xcc, 0x55, 0xe0, 0x1e, 0x49, 0x37, 0xdf, 0x5e, 0xe7, 0x85, 0x58, 0xa8, 0xe6,
0xf9, 0xe5, 0xee, 0xa1, 0xda, 0x37, 0x6f, 0x75, 0xb5, 0x7f, 0x7d, 0x52, 0x75, 0xb5, 0xdd, 0xdd,
0xef, 0xaa, 0x32, 0x3b, 0xc9, 0x2f, 0xc4, 0x99, 0x52, 0x65, 0x36, 0x1b, 0xc5, 0x63, 0x59, 0x66,
0xa7, 0x9b, 0x4e, 0x14, 0x07, 0xb6, 0xf2, 0xbf, 0x74, 0x9b, 0xf9, 0x36, 0x5e, 0x51, 0x8f, 0xbb,
0xea, 0xd9, 0xfb, 0x7a, 0x02, 0x89, 0x3b, 0x74, 0x24, 0xb9, 0x27, 0x20, 0xed, 0x62, 0x12, 0x48,
0x16, 0x7a, 0x33, 0xfc, 0x7d, 0xc6, 0x6d, 0x14, 0xed, 0x79, 0x24, 0x6f, 0x7e, 0x02, 0x00, 0x00,
0xff, 0xff, 0x66, 0x13, 0x33, 0x8e, 0x35, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,276 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/bigtable/admin/v2/instance.proto
package admin
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Possible states of an instance.
type Instance_State int32
const (
// The state of the instance could not be determined.
Instance_STATE_NOT_KNOWN Instance_State = 0
// The instance has been successfully created and can serve requests
// to its tables.
Instance_READY Instance_State = 1
// The instance is currently being created, and may be destroyed
// if the creation process encounters an error.
Instance_CREATING Instance_State = 2
)
var Instance_State_name = map[int32]string{
0: "STATE_NOT_KNOWN",
1: "READY",
2: "CREATING",
}
var Instance_State_value = map[string]int32{
"STATE_NOT_KNOWN": 0,
"READY": 1,
"CREATING": 2,
}
func (x Instance_State) String() string {
return proto.EnumName(Instance_State_name, int32(x))
}
func (Instance_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} }
// The type of the instance.
type Instance_Type int32
const (
// The type of the instance is unspecified. If set when creating an
// instance, a `PRODUCTION` instance will be created. If set when updating
// an instance, the type will be left unchanged.
Instance_TYPE_UNSPECIFIED Instance_Type = 0
// An instance meant for production use. `serve_nodes` must be set
// on the cluster.
Instance_PRODUCTION Instance_Type = 1
)
var Instance_Type_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "PRODUCTION",
}
var Instance_Type_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"PRODUCTION": 1,
}
func (x Instance_Type) String() string {
return proto.EnumName(Instance_Type_name, int32(x))
}
func (Instance_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 1} }
// Possible states of a cluster.
type Cluster_State int32
const (
// The state of the cluster could not be determined.
Cluster_STATE_NOT_KNOWN Cluster_State = 0
// The cluster has been successfully created and is ready to serve requests.
Cluster_READY Cluster_State = 1
// The cluster is currently being created, and may be destroyed
// if the creation process encounters an error.
// A cluster may not be able to serve requests while being created.
Cluster_CREATING Cluster_State = 2
// The cluster is currently being resized, and may revert to its previous
// node count if the process encounters an error.
// A cluster is still capable of serving requests while being resized,
// but may exhibit performance as if its number of allocated nodes is
// between the starting and requested states.
Cluster_RESIZING Cluster_State = 3
// The cluster has no backing nodes. The data (tables) still
// exist, but no operations can be performed on the cluster.
Cluster_DISABLED Cluster_State = 4
)
var Cluster_State_name = map[int32]string{
0: "STATE_NOT_KNOWN",
1: "READY",
2: "CREATING",
3: "RESIZING",
4: "DISABLED",
}
var Cluster_State_value = map[string]int32{
"STATE_NOT_KNOWN": 0,
"READY": 1,
"CREATING": 2,
"RESIZING": 3,
"DISABLED": 4,
}
func (x Cluster_State) String() string {
return proto.EnumName(Cluster_State_name, int32(x))
}
func (Cluster_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{1, 0} }
// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and
// the resources that serve them.
// All tables in an instance are served from a single
// [Cluster][google.bigtable.admin.v2.Cluster].
type Instance struct {
// (`OutputOnly`)
// The unique name of the instance. Values are of the form
// `projects/<project>/instances/[a-z][a-z0-9\\-]+[a-z0-9]`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// The descriptive name for this instance as it appears in UIs.
// Can be changed at any time, but should be kept globally unique
// to avoid confusion.
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
// (`OutputOnly`)
// The current state of the instance.
State Instance_State `protobuf:"varint,3,opt,name=state,enum=google.bigtable.admin.v2.Instance_State" json:"state,omitempty"`
// The type of the instance. Defaults to `PRODUCTION`.
Type Instance_Type `protobuf:"varint,4,opt,name=type,enum=google.bigtable.admin.v2.Instance_Type" json:"type,omitempty"`
}
func (m *Instance) Reset() { *m = Instance{} }
func (m *Instance) String() string { return proto.CompactTextString(m) }
func (*Instance) ProtoMessage() {}
func (*Instance) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
func (m *Instance) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Instance) GetDisplayName() string {
if m != nil {
return m.DisplayName
}
return ""
}
func (m *Instance) GetState() Instance_State {
if m != nil {
return m.State
}
return Instance_STATE_NOT_KNOWN
}
func (m *Instance) GetType() Instance_Type {
if m != nil {
return m.Type
}
return Instance_TYPE_UNSPECIFIED
}
// A resizable group of nodes in a particular cloud location, capable
// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent
// [Instance][google.bigtable.admin.v2.Instance].
type Cluster struct {
// (`OutputOnly`)
// The unique name of the cluster. Values are of the form
// `projects/<project>/instances/<instance>/clusters/[a-z][-a-z0-9]*`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// (`CreationOnly`)
// The location where this cluster's nodes and storage reside. For best
// performance, clients should be located as close as possible to this cluster.
// Currently only zones are supported, so values should be of the form
// `projects/<project>/locations/<zone>`.
Location string `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"`
// (`OutputOnly`)
// The current state of the cluster.
State Cluster_State `protobuf:"varint,3,opt,name=state,enum=google.bigtable.admin.v2.Cluster_State" json:"state,omitempty"`
// The number of nodes allocated to this cluster. More nodes enable higher
// throughput and more consistent performance.
ServeNodes int32 `protobuf:"varint,4,opt,name=serve_nodes,json=serveNodes" json:"serve_nodes,omitempty"`
// (`CreationOnly`)
// The type of storage used by this cluster to serve its
// parent instance's tables, unless explicitly overridden.
DefaultStorageType StorageType `protobuf:"varint,5,opt,name=default_storage_type,json=defaultStorageType,enum=google.bigtable.admin.v2.StorageType" json:"default_storage_type,omitempty"`
}
func (m *Cluster) Reset() { *m = Cluster{} }
func (m *Cluster) String() string { return proto.CompactTextString(m) }
func (*Cluster) ProtoMessage() {}
func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
func (m *Cluster) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Cluster) GetLocation() string {
if m != nil {
return m.Location
}
return ""
}
func (m *Cluster) GetState() Cluster_State {
if m != nil {
return m.State
}
return Cluster_STATE_NOT_KNOWN
}
func (m *Cluster) GetServeNodes() int32 {
if m != nil {
return m.ServeNodes
}
return 0
}
func (m *Cluster) GetDefaultStorageType() StorageType {
if m != nil {
return m.DefaultStorageType
}
return StorageType_STORAGE_TYPE_UNSPECIFIED
}
func init() {
proto.RegisterType((*Instance)(nil), "google.bigtable.admin.v2.Instance")
proto.RegisterType((*Cluster)(nil), "google.bigtable.admin.v2.Cluster")
proto.RegisterEnum("google.bigtable.admin.v2.Instance_State", Instance_State_name, Instance_State_value)
proto.RegisterEnum("google.bigtable.admin.v2.Instance_Type", Instance_Type_name, Instance_Type_value)
proto.RegisterEnum("google.bigtable.admin.v2.Cluster_State", Cluster_State_name, Cluster_State_value)
}
func init() { proto.RegisterFile("google/bigtable/admin/v2/instance.proto", fileDescriptor3) }
var fileDescriptor3 = []byte{
// 463 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xc1, 0x6a, 0xdb, 0x40,
0x10, 0x86, 0x23, 0x47, 0x6a, 0x9d, 0x49, 0x9a, 0x8a, 0x6d, 0x0e, 0xc6, 0x04, 0x9a, 0x0a, 0x42,
0x7c, 0x28, 0x12, 0xb8, 0xf4, 0x14, 0x52, 0xb0, 0x2d, 0xb5, 0x88, 0x16, 0x59, 0x95, 0x14, 0x42,
0x72, 0x11, 0x6b, 0x7b, 0x2b, 0x04, 0xd2, 0xae, 0xd0, 0x6e, 0x0c, 0x7e, 0x9e, 0x3e, 0x4b, 0xdf,
0xab, 0x68, 0x24, 0x97, 0x9a, 0xd6, 0xa5, 0xe4, 0xb6, 0x33, 0xfb, 0xff, 0xf3, 0xaf, 0x3e, 0x0d,
0x5c, 0x65, 0x42, 0x64, 0x05, 0x73, 0x16, 0x79, 0xa6, 0xe8, 0xa2, 0x60, 0x0e, 0x5d, 0x95, 0x39,
0x77, 0xd6, 0x63, 0x27, 0xe7, 0x52, 0x51, 0xbe, 0x64, 0x76, 0x55, 0x0b, 0x25, 0xc8, 0xa0, 0x15,
0xda, 0x5b, 0xa1, 0x8d, 0x42, 0x7b, 0x3d, 0x1e, 0x9e, 0x77, 0x23, 0x68, 0x95, 0x3b, 0x94, 0x73,
0xa1, 0xa8, 0xca, 0x05, 0x97, 0xad, 0x6f, 0x78, 0xb9, 0x37, 0x60, 0x29, 0xca, 0x52, 0xf0, 0x56,
0x66, 0x7d, 0xef, 0x41, 0xdf, 0xef, 0x12, 0x09, 0x01, 0x9d, 0xd3, 0x92, 0x0d, 0xb4, 0x0b, 0x6d,
0x74, 0x14, 0xe1, 0x99, 0xbc, 0x81, 0x93, 0x55, 0x2e, 0xab, 0x82, 0x6e, 0x52, 0xbc, 0xeb, 0xe1,
0xdd, 0x71, 0xd7, 0x0b, 0x1a, 0xc9, 0x07, 0x30, 0xa4, 0xa2, 0x8a, 0x0d, 0x0e, 0x2f, 0xb4, 0xd1,
0xe9, 0x78, 0x64, 0xef, 0x7b, 0xb2, 0xbd, 0x4d, 0xb2, 0xe3, 0x46, 0x1f, 0xb5, 0x36, 0x72, 0x0d,
0xba, 0xda, 0x54, 0x6c, 0xa0, 0xa3, 0xfd, 0xea, 0x3f, 0xec, 0xc9, 0xa6, 0x62, 0x11, 0x9a, 0xac,
0xf7, 0x60, 0xe0, 0x30, 0xf2, 0x0a, 0x5e, 0xc6, 0xc9, 0x24, 0xf1, 0xd2, 0x60, 0x9e, 0xa4, 0x9f,
0x83, 0xf9, 0x5d, 0x60, 0x1e, 0x90, 0x23, 0x30, 0x22, 0x6f, 0xe2, 0xde, 0x9b, 0x1a, 0x39, 0x81,
0xfe, 0x2c, 0xf2, 0x26, 0x89, 0x1f, 0x7c, 0x32, 0x7b, 0xd6, 0x5b, 0xd0, 0x9b, 0x21, 0xe4, 0x0c,
0xcc, 0xe4, 0x3e, 0xf4, 0xd2, 0xdb, 0x20, 0x0e, 0xbd, 0x99, 0xff, 0xd1, 0xf7, 0x5c, 0xf3, 0x80,
0x9c, 0x02, 0x84, 0xd1, 0xdc, 0xbd, 0x9d, 0x25, 0xfe, 0x3c, 0x30, 0x35, 0xeb, 0x47, 0x0f, 0x9e,
0xcf, 0x8a, 0x47, 0xa9, 0x58, 0xfd, 0x57, 0x48, 0x43, 0xe8, 0x17, 0x62, 0x89, 0xfc, 0x3b, 0x40,
0xbf, 0x6a, 0x72, 0xb3, 0x4b, 0xe7, 0x1f, 0x9f, 0xd7, 0x25, 0xec, 0xc2, 0x79, 0x0d, 0xc7, 0x92,
0xd5, 0x6b, 0x96, 0x72, 0xb1, 0x62, 0x12, 0x19, 0x19, 0x11, 0x60, 0x2b, 0x68, 0x3a, 0xe4, 0x0e,
0xce, 0x56, 0xec, 0x1b, 0x7d, 0x2c, 0x54, 0x2a, 0x95, 0xa8, 0x69, 0xc6, 0x52, 0xa4, 0x69, 0x60,
0xdc, 0xe5, 0xfe, 0xb8, 0xb8, 0x55, 0x23, 0x4b, 0xd2, 0x8d, 0xf8, 0xad, 0x67, 0x7d, 0x7d, 0x12,
0xd9, 0xa6, 0x8a, 0xbc, 0xd8, 0x7f, 0x68, 0xaa, 0xc3, 0xa6, 0x72, 0xfd, 0x78, 0x32, 0xfd, 0xe2,
0xb9, 0xa6, 0x3e, 0xe5, 0x70, 0xbe, 0x14, 0xe5, 0xde, 0x27, 0x4d, 0x5f, 0x6c, 0xff, 0x70, 0xd8,
0x2c, 0x67, 0xa8, 0x3d, 0xdc, 0x74, 0xd2, 0x4c, 0x14, 0x94, 0x67, 0xb6, 0xa8, 0x33, 0x27, 0x63,
0x1c, 0x57, 0xd7, 0x69, 0xaf, 0x68, 0x95, 0xcb, 0x3f, 0x97, 0xfc, 0x1a, 0x0f, 0x8b, 0x67, 0xa8,
0x7c, 0xf7, 0x33, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x10, 0x73, 0x5b, 0x6e, 0x03, 0x00, 0x00,
}

View File

@ -0,0 +1,416 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/bigtable/admin/v2/table.proto
package admin
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_protobuf4 "github.com/golang/protobuf/ptypes/duration"
import _ "github.com/golang/protobuf/ptypes/timestamp"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Possible timestamp granularities to use when keeping multiple versions
// of data in a table.
type Table_TimestampGranularity int32
const (
// The user did not specify a granularity. Should not be returned.
// When specified during table creation, MILLIS will be used.
Table_TIMESTAMP_GRANULARITY_UNSPECIFIED Table_TimestampGranularity = 0
// The table keeps data versioned at a granularity of 1ms.
Table_MILLIS Table_TimestampGranularity = 1
)
var Table_TimestampGranularity_name = map[int32]string{
0: "TIMESTAMP_GRANULARITY_UNSPECIFIED",
1: "MILLIS",
}
var Table_TimestampGranularity_value = map[string]int32{
"TIMESTAMP_GRANULARITY_UNSPECIFIED": 0,
"MILLIS": 1,
}
func (x Table_TimestampGranularity) String() string {
return proto.EnumName(Table_TimestampGranularity_name, int32(x))
}
func (Table_TimestampGranularity) EnumDescriptor() ([]byte, []int) {
return fileDescriptor4, []int{0, 0}
}
// Defines a view over a table's fields.
type Table_View int32
const (
// Uses the default view for each method as documented in its request.
Table_VIEW_UNSPECIFIED Table_View = 0
// Only populates `name`.
Table_NAME_ONLY Table_View = 1
// Only populates `name` and fields related to the table's schema.
Table_SCHEMA_VIEW Table_View = 2
// Populates all fields.
Table_FULL Table_View = 4
)
var Table_View_name = map[int32]string{
0: "VIEW_UNSPECIFIED",
1: "NAME_ONLY",
2: "SCHEMA_VIEW",
4: "FULL",
}
var Table_View_value = map[string]int32{
"VIEW_UNSPECIFIED": 0,
"NAME_ONLY": 1,
"SCHEMA_VIEW": 2,
"FULL": 4,
}
func (x Table_View) String() string {
return proto.EnumName(Table_View_name, int32(x))
}
func (Table_View) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{0, 1} }
// A collection of user data indexed by row, column, and timestamp.
// Each table is served using the resources of its parent cluster.
type Table struct {
// (`OutputOnly`)
// The unique name of the table. Values are of the form
// `projects/<project>/instances/<instance>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`.
// Views: `NAME_ONLY`, `SCHEMA_VIEW`, `FULL`
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// (`CreationOnly`)
// The column families configured for this table, mapped by column family ID.
// Views: `SCHEMA_VIEW`, `FULL`
ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families,json=columnFamilies" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
// (`CreationOnly`)
// The granularity (e.g. `MILLIS`, `MICROS`) at which timestamps are stored in
// this table. Timestamps not matching the granularity will be rejected.
// If unspecified at creation time, the value will be set to `MILLIS`.
// Views: `SCHEMA_VIEW`, `FULL`
Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,enum=google.bigtable.admin.v2.Table_TimestampGranularity" json:"granularity,omitempty"`
}
func (m *Table) Reset() { *m = Table{} }
func (m *Table) String() string { return proto.CompactTextString(m) }
func (*Table) ProtoMessage() {}
func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} }
func (m *Table) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Table) GetColumnFamilies() map[string]*ColumnFamily {
if m != nil {
return m.ColumnFamilies
}
return nil
}
func (m *Table) GetGranularity() Table_TimestampGranularity {
if m != nil {
return m.Granularity
}
return Table_TIMESTAMP_GRANULARITY_UNSPECIFIED
}
// A set of columns within a table which share a common configuration.
type ColumnFamily struct {
// Garbage collection rule specified as a protobuf.
// Must serialize to at most 500 bytes.
//
// NOTE: Garbage collection executes opportunistically in the background, and
// so it's possible for reads to return a cell even if it matches the active
// GC expression for its family.
GcRule *GcRule `protobuf:"bytes,1,opt,name=gc_rule,json=gcRule" json:"gc_rule,omitempty"`
}
func (m *ColumnFamily) Reset() { *m = ColumnFamily{} }
func (m *ColumnFamily) String() string { return proto.CompactTextString(m) }
func (*ColumnFamily) ProtoMessage() {}
func (*ColumnFamily) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} }
func (m *ColumnFamily) GetGcRule() *GcRule {
if m != nil {
return m.GcRule
}
return nil
}
// Rule for determining which cells to delete during garbage collection.
type GcRule struct {
// Garbage collection rules.
//
// Types that are valid to be assigned to Rule:
// *GcRule_MaxNumVersions
// *GcRule_MaxAge
// *GcRule_Intersection_
// *GcRule_Union_
Rule isGcRule_Rule `protobuf_oneof:"rule"`
}
func (m *GcRule) Reset() { *m = GcRule{} }
func (m *GcRule) String() string { return proto.CompactTextString(m) }
func (*GcRule) ProtoMessage() {}
func (*GcRule) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} }
type isGcRule_Rule interface {
isGcRule_Rule()
}
type GcRule_MaxNumVersions struct {
MaxNumVersions int32 `protobuf:"varint,1,opt,name=max_num_versions,json=maxNumVersions,oneof"`
}
type GcRule_MaxAge struct {
MaxAge *google_protobuf4.Duration `protobuf:"bytes,2,opt,name=max_age,json=maxAge,oneof"`
}
type GcRule_Intersection_ struct {
Intersection *GcRule_Intersection `protobuf:"bytes,3,opt,name=intersection,oneof"`
}
type GcRule_Union_ struct {
Union *GcRule_Union `protobuf:"bytes,4,opt,name=union,oneof"`
}
func (*GcRule_MaxNumVersions) isGcRule_Rule() {}
func (*GcRule_MaxAge) isGcRule_Rule() {}
func (*GcRule_Intersection_) isGcRule_Rule() {}
func (*GcRule_Union_) isGcRule_Rule() {}
func (m *GcRule) GetRule() isGcRule_Rule {
if m != nil {
return m.Rule
}
return nil
}
func (m *GcRule) GetMaxNumVersions() int32 {
if x, ok := m.GetRule().(*GcRule_MaxNumVersions); ok {
return x.MaxNumVersions
}
return 0
}
func (m *GcRule) GetMaxAge() *google_protobuf4.Duration {
if x, ok := m.GetRule().(*GcRule_MaxAge); ok {
return x.MaxAge
}
return nil
}
func (m *GcRule) GetIntersection() *GcRule_Intersection {
if x, ok := m.GetRule().(*GcRule_Intersection_); ok {
return x.Intersection
}
return nil
}
func (m *GcRule) GetUnion() *GcRule_Union {
if x, ok := m.GetRule().(*GcRule_Union_); ok {
return x.Union
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*GcRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _GcRule_OneofMarshaler, _GcRule_OneofUnmarshaler, _GcRule_OneofSizer, []interface{}{
(*GcRule_MaxNumVersions)(nil),
(*GcRule_MaxAge)(nil),
(*GcRule_Intersection_)(nil),
(*GcRule_Union_)(nil),
}
}
func _GcRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*GcRule)
// rule
switch x := m.Rule.(type) {
case *GcRule_MaxNumVersions:
b.EncodeVarint(1<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.MaxNumVersions))
case *GcRule_MaxAge:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.MaxAge); err != nil {
return err
}
case *GcRule_Intersection_:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Intersection); err != nil {
return err
}
case *GcRule_Union_:
b.EncodeVarint(4<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Union); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("GcRule.Rule has unexpected type %T", x)
}
return nil
}
func _GcRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*GcRule)
switch tag {
case 1: // rule.max_num_versions
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Rule = &GcRule_MaxNumVersions{int32(x)}
return true, err
case 2: // rule.max_age
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(google_protobuf4.Duration)
err := b.DecodeMessage(msg)
m.Rule = &GcRule_MaxAge{msg}
return true, err
case 3: // rule.intersection
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(GcRule_Intersection)
err := b.DecodeMessage(msg)
m.Rule = &GcRule_Intersection_{msg}
return true, err
case 4: // rule.union
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(GcRule_Union)
err := b.DecodeMessage(msg)
m.Rule = &GcRule_Union_{msg}
return true, err
default:
return false, nil
}
}
func _GcRule_OneofSizer(msg proto.Message) (n int) {
m := msg.(*GcRule)
// rule
switch x := m.Rule.(type) {
case *GcRule_MaxNumVersions:
n += proto.SizeVarint(1<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.MaxNumVersions))
case *GcRule_MaxAge:
s := proto.Size(x.MaxAge)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *GcRule_Intersection_:
s := proto.Size(x.Intersection)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *GcRule_Union_:
s := proto.Size(x.Union)
n += proto.SizeVarint(4<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
// A GcRule which deletes cells matching all of the given rules.
type GcRule_Intersection struct {
// Only delete cells which would be deleted by every element of `rules`.
Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
}
func (m *GcRule_Intersection) Reset() { *m = GcRule_Intersection{} }
func (m *GcRule_Intersection) String() string { return proto.CompactTextString(m) }
func (*GcRule_Intersection) ProtoMessage() {}
func (*GcRule_Intersection) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2, 0} }
func (m *GcRule_Intersection) GetRules() []*GcRule {
if m != nil {
return m.Rules
}
return nil
}
// A GcRule which deletes cells matching any of the given rules.
type GcRule_Union struct {
// Delete cells which would be deleted by any element of `rules`.
Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
}
func (m *GcRule_Union) Reset() { *m = GcRule_Union{} }
func (m *GcRule_Union) String() string { return proto.CompactTextString(m) }
func (*GcRule_Union) ProtoMessage() {}
func (*GcRule_Union) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2, 1} }
func (m *GcRule_Union) GetRules() []*GcRule {
if m != nil {
return m.Rules
}
return nil
}
func init() {
proto.RegisterType((*Table)(nil), "google.bigtable.admin.v2.Table")
proto.RegisterType((*ColumnFamily)(nil), "google.bigtable.admin.v2.ColumnFamily")
proto.RegisterType((*GcRule)(nil), "google.bigtable.admin.v2.GcRule")
proto.RegisterType((*GcRule_Intersection)(nil), "google.bigtable.admin.v2.GcRule.Intersection")
proto.RegisterType((*GcRule_Union)(nil), "google.bigtable.admin.v2.GcRule.Union")
proto.RegisterEnum("google.bigtable.admin.v2.Table_TimestampGranularity", Table_TimestampGranularity_name, Table_TimestampGranularity_value)
proto.RegisterEnum("google.bigtable.admin.v2.Table_View", Table_View_name, Table_View_value)
}
func init() { proto.RegisterFile("google/bigtable/admin/v2/table.proto", fileDescriptor4) }
var fileDescriptor4 = []byte{
// 598 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x7d, 0x6b, 0xda, 0x5e,
0x14, 0x36, 0x4d, 0xb4, 0xbf, 0x1e, 0xfb, 0x6b, 0xc3, 0x5d, 0xff, 0x70, 0x52, 0x36, 0x27, 0xdb,
0x90, 0xc1, 0x12, 0xb0, 0x65, 0xec, 0x7d, 0xd8, 0x36, 0xd6, 0x80, 0x3a, 0x89, 0x2f, 0xa3, 0x63,
0x10, 0xae, 0xe9, 0xed, 0xe5, 0xd2, 0xdc, 0x1b, 0xc9, 0x8b, 0xab, 0xdf, 0x62, 0xdf, 0x6c, 0x5f,
0x69, 0xe4, 0x26, 0x32, 0xdb, 0x55, 0x1c, 0xfb, 0xcb, 0x73, 0xcf, 0x79, 0x9e, 0xe7, 0xbc, 0x1a,
0x78, 0x4a, 0x83, 0x80, 0xfa, 0xc4, 0x9c, 0x32, 0x1a, 0xe3, 0xa9, 0x4f, 0x4c, 0x7c, 0xc9, 0x99,
0x30, 0xe7, 0x4d, 0x53, 0x3e, 0x8d, 0x59, 0x18, 0xc4, 0x01, 0xaa, 0x64, 0x28, 0x63, 0x89, 0x32,
0x24, 0xca, 0x98, 0x37, 0xab, 0x87, 0x39, 0x1f, 0xcf, 0x98, 0x89, 0x85, 0x08, 0x62, 0x1c, 0xb3,
0x40, 0x44, 0x19, 0xaf, 0xfa, 0x28, 0x8f, 0xca, 0xd7, 0x34, 0xb9, 0x32, 0x2f, 0x93, 0x50, 0x02,
0xf2, 0xf8, 0xe3, 0xbb, 0xf1, 0x98, 0x71, 0x12, 0xc5, 0x98, 0xcf, 0x32, 0x40, 0xfd, 0xa7, 0x0a,
0xc5, 0x51, 0x9a, 0x11, 0x21, 0xd0, 0x04, 0xe6, 0xa4, 0xa2, 0xd4, 0x94, 0xc6, 0x8e, 0x23, 0x6d,
0xf4, 0x0d, 0xf6, 0xbd, 0xc0, 0x4f, 0xb8, 0x70, 0xaf, 0x30, 0x67, 0x3e, 0x23, 0x51, 0x45, 0xad,
0xa9, 0x8d, 0x72, 0xf3, 0xc8, 0x58, 0x57, 0xb0, 0x21, 0xd5, 0x8c, 0x53, 0x49, 0x6b, 0xe7, 0x2c,
0x4b, 0xc4, 0xe1, 0xc2, 0xd9, 0xf3, 0x6e, 0x39, 0xd1, 0x04, 0xca, 0x34, 0xc4, 0x22, 0xf1, 0x71,
0xc8, 0xe2, 0x45, 0x45, 0xab, 0x29, 0x8d, 0xbd, 0xe6, 0xf1, 0x26, 0xe5, 0xd1, 0xb2, 0x83, 0xf3,
0xdf, 0x5c, 0x67, 0x55, 0xa8, 0xca, 0xe0, 0xc1, 0x3d, 0xe9, 0x91, 0x0e, 0xea, 0x35, 0x59, 0xe4,
0xfd, 0xa5, 0x26, 0x7a, 0x0f, 0xc5, 0x39, 0xf6, 0x13, 0x52, 0xd9, 0xaa, 0x29, 0x8d, 0x72, 0xf3,
0xf9, 0xfa, 0xd4, 0x2b, 0x7a, 0x0b, 0x27, 0x23, 0xbd, 0xdd, 0x7a, 0xad, 0xd4, 0x6d, 0x38, 0xb8,
0xaf, 0x1e, 0xf4, 0x0c, 0x9e, 0x8c, 0xec, 0x9e, 0x35, 0x1c, 0xb5, 0x7a, 0x03, 0xf7, 0xdc, 0x69,
0xf5, 0xc7, 0xdd, 0x96, 0x63, 0x8f, 0x2e, 0xdc, 0x71, 0x7f, 0x38, 0xb0, 0x4e, 0xed, 0xb6, 0x6d,
0x9d, 0xe9, 0x05, 0x04, 0x50, 0xea, 0xd9, 0xdd, 0xae, 0x3d, 0xd4, 0x95, 0x7a, 0x1b, 0xb4, 0x09,
0x23, 0xdf, 0xd1, 0x01, 0xe8, 0x13, 0xdb, 0xfa, 0x72, 0x07, 0xf9, 0x3f, 0xec, 0xf4, 0x5b, 0x3d,
0xcb, 0xfd, 0xdc, 0xef, 0x5e, 0xe8, 0x0a, 0xda, 0x87, 0xf2, 0xf0, 0xb4, 0x63, 0xf5, 0x5a, 0x6e,
0x8a, 0xd5, 0xb7, 0xd0, 0x7f, 0xa0, 0xb5, 0xc7, 0xdd, 0xae, 0xae, 0xd5, 0x6d, 0xd8, 0x5d, 0xad,
0x16, 0xbd, 0x81, 0x6d, 0xea, 0xb9, 0x61, 0xe2, 0x67, 0xab, 0x2d, 0x37, 0x6b, 0xeb, 0xdb, 0x3c,
0xf7, 0x9c, 0xc4, 0x27, 0x4e, 0x89, 0xca, 0xdf, 0xfa, 0x0f, 0x15, 0x4a, 0x99, 0x0b, 0xbd, 0x00,
0x9d, 0xe3, 0x1b, 0x57, 0x24, 0xdc, 0x9d, 0x93, 0x30, 0x4a, 0x4f, 0x50, 0xca, 0x15, 0x3b, 0x05,
0x67, 0x8f, 0xe3, 0x9b, 0x7e, 0xc2, 0x27, 0xb9, 0x1f, 0x1d, 0xc3, 0x76, 0x8a, 0xc5, 0x74, 0x39,
0xd8, 0x87, 0xcb, 0x8c, 0xcb, 0x33, 0x34, 0xce, 0xf2, 0x33, 0xed, 0x14, 0x9c, 0x12, 0xc7, 0x37,
0x2d, 0x4a, 0xd0, 0x10, 0x76, 0x99, 0x88, 0x49, 0x18, 0x11, 0x2f, 0x8d, 0x54, 0x54, 0x49, 0x7d,
0xb9, 0xa9, 0x58, 0xc3, 0x5e, 0x21, 0x75, 0x0a, 0xce, 0x2d, 0x11, 0xf4, 0x11, 0x8a, 0x89, 0x48,
0xd5, 0xb4, 0x4d, 0x1b, 0xce, 0xd5, 0xc6, 0x22, 0x93, 0xc9, 0x68, 0xd5, 0x36, 0xec, 0xae, 0xea,
0xa3, 0x57, 0x50, 0x4c, 0x27, 0x99, 0xf6, 0xae, 0xfe, 0xd5, 0x28, 0x33, 0x78, 0xf5, 0x13, 0x14,
0xa5, 0xf2, 0xbf, 0x0a, 0x9c, 0x94, 0x40, 0x4b, 0x8d, 0x93, 0x6b, 0x38, 0xf4, 0x02, 0xbe, 0x96,
0x75, 0x02, 0xf2, 0x4f, 0x32, 0x48, 0xe7, 0x3c, 0x50, 0xbe, 0x7e, 0xc8, 0x71, 0x34, 0xf0, 0xb1,
0xa0, 0x46, 0x10, 0x52, 0x93, 0x12, 0x21, 0xb7, 0x60, 0x66, 0x21, 0x3c, 0x63, 0xd1, 0x9f, 0xdf,
0xa6, 0x77, 0xd2, 0x98, 0x96, 0x24, 0xf2, 0xe8, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, 0xe3,
0x1b, 0xd9, 0xc4, 0x04, 0x00, 0x00,
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,596 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/longrunning/operations.proto
/*
Package longrunning is a generated protocol buffer package.
It is generated from these files:
google/longrunning/operations.proto
It has these top-level messages:
Operation
GetOperationRequest
ListOperationsRequest
ListOperationsResponse
CancelOperationRequest
DeleteOperationRequest
*/
package longrunning
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import google_protobuf1 "github.com/golang/protobuf/ptypes/any"
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
import google_rpc "google.golang.org/genproto/googleapis/rpc/status"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// This resource represents a long-running operation that is the result of a
// network API call.
type Operation struct {
// The server-assigned name, which is only unique within the same service that
// originally returns it. If you use the default HTTP mapping, the
// `name` should have the format of `operations/some/unique/name`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Service-specific metadata associated with the operation. It typically
// contains progress information and common metadata such as create time.
// Some services might not provide such metadata. Any method that returns a
// long-running operation should document the metadata type, if any.
Metadata *google_protobuf1.Any `protobuf:"bytes,2,opt,name=metadata" json:"metadata,omitempty"`
// If the value is `false`, it means the operation is still in progress.
// If true, the operation is completed, and either `error` or `response` is
// available.
Done bool `protobuf:"varint,3,opt,name=done" json:"done,omitempty"`
// The operation result, which can be either an `error` or a valid `response`.
// If `done` == `false`, neither `error` nor `response` is set.
// If `done` == `true`, exactly one of `error` or `response` is set.
//
// Types that are valid to be assigned to Result:
// *Operation_Error
// *Operation_Response
Result isOperation_Result `protobuf_oneof:"result"`
}
func (m *Operation) Reset() { *m = Operation{} }
func (m *Operation) String() string { return proto.CompactTextString(m) }
func (*Operation) ProtoMessage() {}
func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type isOperation_Result interface {
isOperation_Result()
}
type Operation_Error struct {
Error *google_rpc.Status `protobuf:"bytes,4,opt,name=error,oneof"`
}
type Operation_Response struct {
Response *google_protobuf1.Any `protobuf:"bytes,5,opt,name=response,oneof"`
}
func (*Operation_Error) isOperation_Result() {}
func (*Operation_Response) isOperation_Result() {}
func (m *Operation) GetResult() isOperation_Result {
if m != nil {
return m.Result
}
return nil
}
func (m *Operation) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Operation) GetMetadata() *google_protobuf1.Any {
if m != nil {
return m.Metadata
}
return nil
}
func (m *Operation) GetDone() bool {
if m != nil {
return m.Done
}
return false
}
func (m *Operation) GetError() *google_rpc.Status {
if x, ok := m.GetResult().(*Operation_Error); ok {
return x.Error
}
return nil
}
func (m *Operation) GetResponse() *google_protobuf1.Any {
if x, ok := m.GetResult().(*Operation_Response); ok {
return x.Response
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Operation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Operation_OneofMarshaler, _Operation_OneofUnmarshaler, _Operation_OneofSizer, []interface{}{
(*Operation_Error)(nil),
(*Operation_Response)(nil),
}
}
func _Operation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Operation)
// result
switch x := m.Result.(type) {
case *Operation_Error:
b.EncodeVarint(4<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Error); err != nil {
return err
}
case *Operation_Response:
b.EncodeVarint(5<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Response); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Operation.Result has unexpected type %T", x)
}
return nil
}
func _Operation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Operation)
switch tag {
case 4: // result.error
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(google_rpc.Status)
err := b.DecodeMessage(msg)
m.Result = &Operation_Error{msg}
return true, err
case 5: // result.response
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(google_protobuf1.Any)
err := b.DecodeMessage(msg)
m.Result = &Operation_Response{msg}
return true, err
default:
return false, nil
}
}
func _Operation_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Operation)
// result
switch x := m.Result.(type) {
case *Operation_Error:
s := proto.Size(x.Error)
n += proto.SizeVarint(4<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Operation_Response:
s := proto.Size(x.Response)
n += proto.SizeVarint(5<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
type GetOperationRequest struct {
// The name of the operation resource.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *GetOperationRequest) Reset() { *m = GetOperationRequest{} }
func (m *GetOperationRequest) String() string { return proto.CompactTextString(m) }
func (*GetOperationRequest) ProtoMessage() {}
func (*GetOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *GetOperationRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
type ListOperationsRequest struct {
// The name of the operation collection.
Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
// The standard list filter.
Filter string `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"`
// The standard list page size.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"`
// The standard list page token.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"`
}
func (m *ListOperationsRequest) Reset() { *m = ListOperationsRequest{} }
func (m *ListOperationsRequest) String() string { return proto.CompactTextString(m) }
func (*ListOperationsRequest) ProtoMessage() {}
func (*ListOperationsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *ListOperationsRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ListOperationsRequest) GetFilter() string {
if m != nil {
return m.Filter
}
return ""
}
func (m *ListOperationsRequest) GetPageSize() int32 {
if m != nil {
return m.PageSize
}
return 0
}
func (m *ListOperationsRequest) GetPageToken() string {
if m != nil {
return m.PageToken
}
return ""
}
// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
type ListOperationsResponse struct {
// A list of operations that matches the specified filter in the request.
Operations []*Operation `protobuf:"bytes,1,rep,name=operations" json:"operations,omitempty"`
// The standard List next-page token.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"`
}
func (m *ListOperationsResponse) Reset() { *m = ListOperationsResponse{} }
func (m *ListOperationsResponse) String() string { return proto.CompactTextString(m) }
func (*ListOperationsResponse) ProtoMessage() {}
func (*ListOperationsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *ListOperationsResponse) GetOperations() []*Operation {
if m != nil {
return m.Operations
}
return nil
}
func (m *ListOperationsResponse) GetNextPageToken() string {
if m != nil {
return m.NextPageToken
}
return ""
}
// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
type CancelOperationRequest struct {
// The name of the operation resource to be cancelled.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *CancelOperationRequest) Reset() { *m = CancelOperationRequest{} }
func (m *CancelOperationRequest) String() string { return proto.CompactTextString(m) }
func (*CancelOperationRequest) ProtoMessage() {}
func (*CancelOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *CancelOperationRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
type DeleteOperationRequest struct {
// The name of the operation resource to be deleted.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *DeleteOperationRequest) Reset() { *m = DeleteOperationRequest{} }
func (m *DeleteOperationRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteOperationRequest) ProtoMessage() {}
func (*DeleteOperationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *DeleteOperationRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func init() {
proto.RegisterType((*Operation)(nil), "google.longrunning.Operation")
proto.RegisterType((*GetOperationRequest)(nil), "google.longrunning.GetOperationRequest")
proto.RegisterType((*ListOperationsRequest)(nil), "google.longrunning.ListOperationsRequest")
proto.RegisterType((*ListOperationsResponse)(nil), "google.longrunning.ListOperationsResponse")
proto.RegisterType((*CancelOperationRequest)(nil), "google.longrunning.CancelOperationRequest")
proto.RegisterType((*DeleteOperationRequest)(nil), "google.longrunning.DeleteOperationRequest")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Operations service
type OperationsClient interface {
// Lists operations that match the specified filter in the request. If the
// server doesn't support this method, it returns `UNIMPLEMENTED`.
//
// NOTE: the `name` binding below allows API services to override the binding
// to use different resource name schemes, such as `users/*/operations`.
ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error)
// Gets the latest state of a long-running operation. Clients can use this
// method to poll the operation result at intervals as recommended by the API
// service.
GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error)
// Deletes a long-running operation. This method indicates that the client is
// no longer interested in the operation result. It does not cancel the
// operation. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`.
DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
// Starts asynchronous cancellation on a long-running operation. The server
// makes a best effort to cancel the operation, but success is not
// guaranteed. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`. Clients can use
// [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
// other methods to check whether the cancellation succeeded or whether the
// operation completed despite cancellation. On successful cancellation,
// the operation is not deleted; instead, it becomes an operation with
// an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`.
CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
}
type operationsClient struct {
cc *grpc.ClientConn
}
func NewOperationsClient(cc *grpc.ClientConn) OperationsClient {
return &operationsClient{cc}
}
func (c *operationsClient) ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) {
out := new(ListOperationsResponse)
err := grpc.Invoke(ctx, "/google.longrunning.Operations/ListOperations", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *operationsClient) GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) {
out := new(Operation)
err := grpc.Invoke(ctx, "/google.longrunning.Operations/GetOperation", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *operationsClient) DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
out := new(google_protobuf2.Empty)
err := grpc.Invoke(ctx, "/google.longrunning.Operations/DeleteOperation", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *operationsClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
out := new(google_protobuf2.Empty)
err := grpc.Invoke(ctx, "/google.longrunning.Operations/CancelOperation", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Operations service
type OperationsServer interface {
// Lists operations that match the specified filter in the request. If the
// server doesn't support this method, it returns `UNIMPLEMENTED`.
//
// NOTE: the `name` binding below allows API services to override the binding
// to use different resource name schemes, such as `users/*/operations`.
ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error)
// Gets the latest state of a long-running operation. Clients can use this
// method to poll the operation result at intervals as recommended by the API
// service.
GetOperation(context.Context, *GetOperationRequest) (*Operation, error)
// Deletes a long-running operation. This method indicates that the client is
// no longer interested in the operation result. It does not cancel the
// operation. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`.
DeleteOperation(context.Context, *DeleteOperationRequest) (*google_protobuf2.Empty, error)
// Starts asynchronous cancellation on a long-running operation. The server
// makes a best effort to cancel the operation, but success is not
// guaranteed. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`. Clients can use
// [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
// other methods to check whether the cancellation succeeded or whether the
// operation completed despite cancellation. On successful cancellation,
// the operation is not deleted; instead, it becomes an operation with
// an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`.
CancelOperation(context.Context, *CancelOperationRequest) (*google_protobuf2.Empty, error)
}
func RegisterOperationsServer(s *grpc.Server, srv OperationsServer) {
s.RegisterService(&_Operations_serviceDesc, srv)
}
func _Operations_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListOperationsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(OperationsServer).ListOperations(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.longrunning.Operations/ListOperations",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(OperationsServer).ListOperations(ctx, req.(*ListOperationsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Operations_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetOperationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(OperationsServer).GetOperation(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.longrunning.Operations/GetOperation",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(OperationsServer).GetOperation(ctx, req.(*GetOperationRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Operations_DeleteOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteOperationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(OperationsServer).DeleteOperation(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.longrunning.Operations/DeleteOperation",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(OperationsServer).DeleteOperation(ctx, req.(*DeleteOperationRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Operations_CancelOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CancelOperationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(OperationsServer).CancelOperation(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.longrunning.Operations/CancelOperation",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(OperationsServer).CancelOperation(ctx, req.(*CancelOperationRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Operations_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.longrunning.Operations",
HandlerType: (*OperationsServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ListOperations",
Handler: _Operations_ListOperations_Handler,
},
{
MethodName: "GetOperation",
Handler: _Operations_GetOperation_Handler,
},
{
MethodName: "DeleteOperation",
Handler: _Operations_DeleteOperation_Handler,
},
{
MethodName: "CancelOperation",
Handler: _Operations_CancelOperation_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/longrunning/operations.proto",
}
func init() { proto.RegisterFile("google/longrunning/operations.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 586 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0xad, 0xd3, 0x24, 0x4a, 0xa6, 0x40, 0xa4, 0x85, 0xba, 0xc6, 0x25, 0x22, 0x32, 0x08, 0x52,
0xab, 0xb2, 0x21, 0xdc, 0x8a, 0x72, 0x20, 0x80, 0xda, 0x43, 0x25, 0x22, 0x97, 0x13, 0x97, 0x6a,
0x9b, 0x4e, 0x2d, 0x0b, 0x67, 0xd7, 0xac, 0x37, 0xd0, 0x16, 0x55, 0x11, 0x1c, 0x38, 0x71, 0xe3,
0x2f, 0xf8, 0x19, 0x0e, 0xfc, 0x02, 0x1f, 0x82, 0xbc, 0x76, 0x62, 0x93, 0x3a, 0x28, 0xb7, 0xf5,
0xcc, 0x9b, 0x79, 0xf3, 0xde, 0xce, 0x1a, 0x1e, 0xf8, 0x9c, 0xfb, 0x21, 0xba, 0x21, 0x67, 0xbe,
0x98, 0x30, 0x16, 0x30, 0xdf, 0xe5, 0x11, 0x0a, 0x2a, 0x03, 0xce, 0x62, 0x27, 0x12, 0x5c, 0x72,
0x42, 0x52, 0x90, 0x53, 0x00, 0x99, 0xf7, 0xb2, 0x42, 0x1a, 0x05, 0x2e, 0x65, 0x8c, 0xcb, 0x62,
0x85, 0x79, 0x37, 0xcb, 0xaa, 0xaf, 0x93, 0xc9, 0x99, 0x4b, 0xd9, 0x45, 0x96, 0xda, 0x5e, 0x4c,
0xe1, 0x38, 0x92, 0xb3, 0xe4, 0x56, 0x96, 0x14, 0xd1, 0xc8, 0x8d, 0x25, 0x95, 0x93, 0xac, 0xa1,
0xf5, 0x4b, 0x83, 0xe6, 0x9b, 0xd9, 0x5c, 0x84, 0x40, 0x95, 0xd1, 0x31, 0x1a, 0x5a, 0x47, 0xeb,
0x36, 0x3d, 0x75, 0x26, 0x4f, 0xa0, 0x31, 0x46, 0x49, 0x4f, 0xa9, 0xa4, 0x46, 0xa5, 0xa3, 0x75,
0x37, 0x7a, 0x77, 0x9c, 0x6c, 0xee, 0x19, 0x95, 0xf3, 0x82, 0x5d, 0x78, 0x73, 0x54, 0xd2, 0xe5,
0x94, 0x33, 0x34, 0xd6, 0x3b, 0x5a, 0xb7, 0xe1, 0xa9, 0x33, 0xb1, 0xa1, 0x86, 0x42, 0x70, 0x61,
0x54, 0x55, 0x0b, 0x32, 0x6b, 0x21, 0xa2, 0x91, 0x73, 0xa4, 0x06, 0x3a, 0x58, 0xf3, 0x52, 0x08,
0xe9, 0x41, 0x43, 0x60, 0x1c, 0x71, 0x16, 0xa3, 0x51, 0x5b, 0xce, 0x78, 0xb0, 0xe6, 0xcd, 0x71,
0x83, 0x06, 0xd4, 0x05, 0xc6, 0x93, 0x50, 0x5a, 0x3b, 0x70, 0x7b, 0x1f, 0xe5, 0x5c, 0x93, 0x87,
0x1f, 0x26, 0x18, 0xcb, 0x32, 0x69, 0xd6, 0x14, 0x36, 0x0f, 0x83, 0x38, 0xc7, 0xc6, 0x8b, 0xe0,
0x6a, 0xc1, 0x07, 0x1d, 0xea, 0x67, 0x41, 0x28, 0x51, 0x64, 0x2d, 0xb2, 0x2f, 0xb2, 0x0d, 0xcd,
0x88, 0xfa, 0x78, 0x1c, 0x07, 0x97, 0xa8, 0x0c, 0xaa, 0x79, 0x8d, 0x24, 0x70, 0x14, 0x5c, 0x22,
0x69, 0x03, 0xa8, 0xa4, 0xe4, 0xef, 0x91, 0x29, 0x43, 0x9a, 0x9e, 0x82, 0xbf, 0x4d, 0x02, 0xd6,
0x14, 0xf4, 0xc5, 0x01, 0x52, 0x3d, 0xa4, 0x0f, 0x90, 0xaf, 0x8b, 0xa1, 0x75, 0xd6, 0xbb, 0x1b,
0xbd, 0xb6, 0x73, 0x7d, 0x5f, 0x9c, 0x5c, 0x68, 0xa1, 0x80, 0x3c, 0x82, 0x16, 0xc3, 0x73, 0x79,
0x5c, 0x20, 0xaf, 0x28, 0xf2, 0x9b, 0x49, 0x78, 0x38, 0x1f, 0x60, 0x17, 0xf4, 0x97, 0x94, 0x8d,
0x30, 0x5c, 0xc9, 0xaf, 0x5d, 0xd0, 0x5f, 0x61, 0x88, 0x12, 0x57, 0x41, 0xf7, 0xbe, 0x57, 0x01,
0x72, 0x65, 0xe4, 0x9b, 0x06, 0xb7, 0xfe, 0x15, 0x4b, 0x76, 0xca, 0x04, 0x95, 0xde, 0x88, 0x69,
0xaf, 0x02, 0x4d, 0xbd, 0xb3, 0xda, 0x5f, 0x7f, 0xff, 0xf9, 0x51, 0xd9, 0x22, 0x9b, 0xee, 0xc7,
0xa7, 0xee, 0xe7, 0x64, 0x96, 0x7e, 0x6e, 0xcd, 0x15, 0x39, 0x87, 0x1b, 0xc5, 0x05, 0x21, 0x8f,
0xcb, 0x5a, 0x97, 0xac, 0x90, 0xf9, 0x7f, 0xff, 0xad, 0x8e, 0xa2, 0x35, 0x89, 0x51, 0x46, 0xeb,
0xda, 0xf6, 0x15, 0xf9, 0x04, 0xad, 0x05, 0xff, 0x48, 0xa9, 0xae, 0x72, 0x93, 0x4d, 0xfd, 0xda,
0x2b, 0x78, 0x9d, 0x3c, 0xf1, 0x19, 0xb1, 0xbd, 0x9c, 0xf8, 0x8b, 0x06, 0xad, 0x85, 0x7b, 0x2e,
0x67, 0x2e, 0x5f, 0x86, 0xa5, 0xcc, 0xb6, 0x62, 0x7e, 0x68, 0xdd, 0x5f, 0xc6, 0xbc, 0x37, 0x52,
0x0d, 0xf7, 0x34, 0x7b, 0x30, 0x05, 0x7d, 0xc4, 0xc7, 0x25, 0xa4, 0x83, 0x56, 0x7e, 0x87, 0xc3,
0xa4, 0xff, 0x50, 0x7b, 0xd7, 0xcf, 0x60, 0x3e, 0x0f, 0x29, 0xf3, 0x1d, 0x2e, 0x7c, 0xd7, 0x47,
0xa6, 0xd8, 0xdd, 0x34, 0x45, 0xa3, 0x20, 0x2e, 0xfe, 0x5d, 0x9f, 0x17, 0xce, 0x3f, 0x2b, 0x64,
0x3f, 0xad, 0x3f, 0xe4, 0xcc, 0xf7, 0xd2, 0xe0, 0x49, 0x5d, 0x95, 0x3f, 0xfb, 0x1b, 0x00, 0x00,
0xff, 0xff, 0x69, 0x4c, 0xa6, 0x3e, 0x9b, 0x05, 0x00, 0x00,
}

View File

@ -0,0 +1,143 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/rpc/status.proto
/*
Package status is a generated protocol buffer package.
It is generated from these files:
google/rpc/status.proto
It has these top-level messages:
Status
*/
package status
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/any"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The `Status` type defines a logical error model that is suitable for different
// programming environments, including REST APIs and RPC APIs. It is used by
// [gRPC](https://github.com/grpc). The error model is designed to be:
//
// - Simple to use and understand for most users
// - Flexible enough to meet unexpected needs
//
// # Overview
//
// The `Status` message contains three pieces of data: error code, error message,
// and error details. The error code should be an enum value of
// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
// error message should be a developer-facing English message that helps
// developers *understand* and *resolve* the error. If a localized user-facing
// error message is needed, put the localized message in the error details or
// localize it in the client. The optional error details may contain arbitrary
// information about the error. There is a predefined set of error detail types
// in the package `google.rpc` which can be used for common error conditions.
//
// # Language mapping
//
// The `Status` message is the logical representation of the error model, but it
// is not necessarily the actual wire format. When the `Status` message is
// exposed in different client libraries and different wire protocols, it can be
// mapped differently. For example, it will likely be mapped to some exceptions
// in Java, but more likely mapped to some error codes in C.
//
// # Other uses
//
// The error model and the `Status` message can be used in a variety of
// environments, either with or without APIs, to provide a
// consistent developer experience across different environments.
//
// Example uses of this error model include:
//
// - Partial errors. If a service needs to return partial errors to the client,
// it may embed the `Status` in the normal response to indicate the partial
// errors.
//
// - Workflow errors. A typical workflow has multiple steps. Each step may
// have a `Status` message for error reporting purpose.
//
// - Batch operations. If a client uses batch request and batch response, the
// `Status` message should be used directly inside batch response, one for
// each error sub-response.
//
// - Asynchronous operations. If an API call embeds asynchronous operation
// results in its response, the status of those operations should be
// represented directly using the `Status` message.
//
// - Logging. If some API errors are stored in logs, the message `Status` could
// be used directly after any stripping needed for security/privacy reasons.
type Status struct {
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"`
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
// A list of messages that carry the error details. There will be a
// common set of message types for APIs to use.
Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
}
func (m *Status) Reset() { *m = Status{} }
func (m *Status) String() string { return proto.CompactTextString(m) }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Status) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Status) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Status) GetDetails() []*google_protobuf.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Status)(nil), "google.rpc.Status")
}
func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 209 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81,
0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1,
0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83,
0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05,
0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7,
0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7,
0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c,
0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12,
0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12,
0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1,
0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00,
0x00,
}

View File

@ -0,0 +1,343 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: src/google/protobuf/api.proto
/*
Package api is a generated protocol buffer package.
It is generated from these files:
src/google/protobuf/api.proto
It has these top-level messages:
Api
Method
Mixin
*/
package api
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "google.golang.org/genproto/protobuf/source_context"
import google_protobuf2 "google.golang.org/genproto/protobuf/ptype"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Api is a light-weight descriptor for a protocol buffer service.
type Api struct {
// The fully qualified name of this api, including package name
// followed by the api's simple name.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// The methods of this api, in unspecified order.
Methods []*Method `protobuf:"bytes,2,rep,name=methods" json:"methods,omitempty"`
// Any metadata attached to the API.
Options []*google_protobuf2.Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"`
// A version string for this api. If specified, must have the form
// `major-version.minor-version`, as in `1.10`. If the minor version
// is omitted, it defaults to zero. If the entire version field is
// empty, the major version is derived from the package name, as
// outlined below. If the field is not empty, the version in the
// package name will be verified to be consistent with what is
// provided here.
//
// The versioning schema uses [semantic
// versioning](http://semver.org) where the major version number
// indicates a breaking change and the minor version an additive,
// non-breaking change. Both version numbers are signals to users
// what to expect from different versions, and should be carefully
// chosen based on the product plan.
//
// The major version is also reflected in the package name of the
// API, which must end in `v<major-version>`, as in
// `google.feature.v1`. For major versions 0 and 1, the suffix can
// be omitted. Zero major versions must only be used for
// experimental, none-GA apis.
//
//
Version string `protobuf:"bytes,4,opt,name=version" json:"version,omitempty"`
// Source context for the protocol buffer service represented by this
// message.
SourceContext *google_protobuf.SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"`
// Included APIs. See [Mixin][].
Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins" json:"mixins,omitempty"`
// The source syntax of the service.
Syntax google_protobuf2.Syntax `protobuf:"varint,7,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"`
}
func (m *Api) Reset() { *m = Api{} }
func (m *Api) String() string { return proto.CompactTextString(m) }
func (*Api) ProtoMessage() {}
func (*Api) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Api) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Api) GetMethods() []*Method {
if m != nil {
return m.Methods
}
return nil
}
func (m *Api) GetOptions() []*google_protobuf2.Option {
if m != nil {
return m.Options
}
return nil
}
func (m *Api) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *Api) GetSourceContext() *google_protobuf.SourceContext {
if m != nil {
return m.SourceContext
}
return nil
}
func (m *Api) GetMixins() []*Mixin {
if m != nil {
return m.Mixins
}
return nil
}
func (m *Api) GetSyntax() google_protobuf2.Syntax {
if m != nil {
return m.Syntax
}
return google_protobuf2.Syntax_SYNTAX_PROTO2
}
// Method represents a method of an api.
type Method struct {
// The simple name of this method.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// A URL of the input message type.
RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl" json:"request_type_url,omitempty"`
// If true, the request is streamed.
RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming" json:"request_streaming,omitempty"`
// The URL of the output message type.
ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl" json:"response_type_url,omitempty"`
// If true, the response is streamed.
ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming" json:"response_streaming,omitempty"`
// Any metadata attached to the method.
Options []*google_protobuf2.Option `protobuf:"bytes,6,rep,name=options" json:"options,omitempty"`
// The source syntax of this method.
Syntax google_protobuf2.Syntax `protobuf:"varint,7,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"`
}
func (m *Method) Reset() { *m = Method{} }
func (m *Method) String() string { return proto.CompactTextString(m) }
func (*Method) ProtoMessage() {}
func (*Method) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Method) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Method) GetRequestTypeUrl() string {
if m != nil {
return m.RequestTypeUrl
}
return ""
}
func (m *Method) GetRequestStreaming() bool {
if m != nil {
return m.RequestStreaming
}
return false
}
func (m *Method) GetResponseTypeUrl() string {
if m != nil {
return m.ResponseTypeUrl
}
return ""
}
func (m *Method) GetResponseStreaming() bool {
if m != nil {
return m.ResponseStreaming
}
return false
}
func (m *Method) GetOptions() []*google_protobuf2.Option {
if m != nil {
return m.Options
}
return nil
}
func (m *Method) GetSyntax() google_protobuf2.Syntax {
if m != nil {
return m.Syntax
}
return google_protobuf2.Syntax_SYNTAX_PROTO2
}
// Declares an API to be included in this API. The including API must
// redeclare all the methods from the included API, but documentation
// and options are inherited as follows:
//
// - If after comment and whitespace stripping, the documentation
// string of the redeclared method is empty, it will be inherited
// from the original method.
//
// - Each annotation belonging to the service config (http,
// visibility) which is not set in the redeclared method will be
// inherited.
//
// - If an http annotation is inherited, the path pattern will be
// modified as follows. Any version prefix will be replaced by the
// version of the including API plus the [root][] path if specified.
//
// Example of a simple mixin:
//
// package google.acl.v1;
// service AccessControl {
// // Get the underlying ACL object.
// rpc GetAcl(GetAclRequest) returns (Acl) {
// option (google.api.http).get = "/v1/{resource=**}:getAcl";
// }
// }
//
// package google.storage.v2;
// service Storage {
// rpc GetAcl(GetAclRequest) returns (Acl);
//
// // Get a data record.
// rpc GetData(GetDataRequest) returns (Data) {
// option (google.api.http).get = "/v2/{resource=**}";
// }
// }
//
// Example of a mixin configuration:
//
// apis:
// - name: google.storage.v2.Storage
// mixins:
// - name: google.acl.v1.AccessControl
//
// The mixin construct implies that all methods in `AccessControl` are
// also declared with same name and request/response types in
// `Storage`. A documentation generator or annotation processor will
// see the effective `Storage.GetAcl` method after inherting
// documentation and annotations as follows:
//
// service Storage {
// // Get the underlying ACL object.
// rpc GetAcl(GetAclRequest) returns (Acl) {
// option (google.api.http).get = "/v2/{resource=**}:getAcl";
// }
// ...
// }
//
// Note how the version in the path pattern changed from `v1` to `v2`.
//
// If the `root` field in the mixin is specified, it should be a
// relative path under which inherited HTTP paths are placed. Example:
//
// apis:
// - name: google.storage.v2.Storage
// mixins:
// - name: google.acl.v1.AccessControl
// root: acls
//
// This implies the following inherited HTTP annotation:
//
// service Storage {
// // Get the underlying ACL object.
// rpc GetAcl(GetAclRequest) returns (Acl) {
// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl";
// }
// ...
// }
type Mixin struct {
// The fully qualified name of the API which is included.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// If non-empty specifies a path under which inherited HTTP paths
// are rooted.
Root string `protobuf:"bytes,2,opt,name=root" json:"root,omitempty"`
}
func (m *Mixin) Reset() { *m = Mixin{} }
func (m *Mixin) String() string { return proto.CompactTextString(m) }
func (*Mixin) ProtoMessage() {}
func (*Mixin) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Mixin) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Mixin) GetRoot() string {
if m != nil {
return m.Root
}
return ""
}
func init() {
proto.RegisterType((*Api)(nil), "google.protobuf.Api")
proto.RegisterType((*Method)(nil), "google.protobuf.Method")
proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin")
}
func init() { proto.RegisterFile("src/google/protobuf/api.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 435 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4f, 0x8b, 0xd3, 0x40,
0x18, 0xc6, 0x49, 0xd2, 0xa6, 0xeb, 0x2c, 0x76, 0x75, 0x04, 0x1d, 0x0a, 0x2e, 0x61, 0xf1, 0x10,
0x5c, 0x4c, 0x70, 0x3d, 0x7a, 0x6a, 0x45, 0x7a, 0x10, 0x31, 0xa4, 0x8a, 0xe0, 0xa5, 0xa4, 0x71,
0x8c, 0x03, 0xc9, 0xbc, 0xe3, 0xcc, 0x44, 0xdb, 0xaf, 0xe3, 0xd1, 0xa3, 0xdf, 0xc0, 0x6f, 0x26,
0x99, 0x64, 0xfa, 0x27, 0xad, 0xe0, 0xde, 0xe6, 0x9d, 0xe7, 0xf7, 0x3e, 0x79, 0xdf, 0x67, 0x08,
0x7a, 0xac, 0x64, 0x1e, 0x17, 0x00, 0x45, 0x49, 0x63, 0x21, 0x41, 0xc3, 0xaa, 0xfe, 0x12, 0x67,
0x82, 0x45, 0xa6, 0xc0, 0x17, 0xad, 0x14, 0x59, 0x69, 0xf2, 0xa4, 0xcf, 0x2a, 0xa8, 0x65, 0x4e,
0x97, 0x39, 0x70, 0x4d, 0xd7, 0xba, 0x05, 0x27, 0x93, 0x3e, 0xa5, 0x37, 0xa2, 0x33, 0xb9, 0xfa,
0xe3, 0x22, 0x6f, 0x2a, 0x18, 0xc6, 0x68, 0xc0, 0xb3, 0x8a, 0x12, 0x27, 0x70, 0xc2, 0x3b, 0xa9,
0x39, 0xe3, 0xe7, 0x68, 0x54, 0x51, 0xfd, 0x15, 0x3e, 0x2b, 0xe2, 0x06, 0x5e, 0x78, 0x7e, 0xf3,
0x28, 0xea, 0x0d, 0x10, 0xbd, 0x35, 0x7a, 0x6a, 0xb9, 0xa6, 0x05, 0x84, 0x66, 0xc0, 0x15, 0xf1,
0xfe, 0xd1, 0xf2, 0xce, 0xe8, 0xa9, 0xe5, 0x30, 0x41, 0xa3, 0xef, 0x54, 0x2a, 0x06, 0x9c, 0x0c,
0xcc, 0xc7, 0x6d, 0x89, 0x5f, 0xa3, 0xf1, 0xe1, 0x3e, 0x64, 0x18, 0x38, 0xe1, 0xf9, 0xcd, 0xe5,
0x91, 0xe7, 0xc2, 0x60, 0xaf, 0x5a, 0x2a, 0xbd, 0xab, 0xf6, 0x4b, 0x1c, 0x21, 0xbf, 0x62, 0x6b,
0xc6, 0x15, 0xf1, 0xcd, 0x48, 0x0f, 0x8f, 0xb7, 0x68, 0xe4, 0xb4, 0xa3, 0x70, 0x8c, 0x7c, 0xb5,
0xe1, 0x3a, 0x5b, 0x93, 0x51, 0xe0, 0x84, 0xe3, 0x13, 0x2b, 0x2c, 0x8c, 0x9c, 0x76, 0xd8, 0xd5,
0x6f, 0x17, 0xf9, 0x6d, 0x10, 0x27, 0x63, 0x0c, 0xd1, 0x3d, 0x49, 0xbf, 0xd5, 0x54, 0xe9, 0x65,
0x13, 0xfc, 0xb2, 0x96, 0x25, 0x71, 0x8d, 0x3e, 0xee, 0xee, 0xdf, 0x6f, 0x04, 0xfd, 0x20, 0x4b,
0x7c, 0x8d, 0xee, 0x5b, 0x52, 0x69, 0x49, 0xb3, 0x8a, 0xf1, 0x82, 0x78, 0x81, 0x13, 0x9e, 0xa5,
0xd6, 0x62, 0x61, 0xef, 0xf1, 0xd3, 0x06, 0x56, 0x02, 0xb8, 0xa2, 0x3b, 0xdf, 0x36, 0xc1, 0x0b,
0x2b, 0x58, 0xe3, 0x67, 0x08, 0x6f, 0xd9, 0x9d, 0xf3, 0xd0, 0x38, 0x6f, 0x5d, 0x76, 0xd6, 0x7b,
0xaf, 0xe8, 0xff, 0xe7, 0x2b, 0xde, 0x3a, 0xb4, 0x18, 0x0d, 0x4d, 0xec, 0x27, 0x23, 0xc3, 0x68,
0x20, 0x01, 0x74, 0x17, 0x93, 0x39, 0xcf, 0x6a, 0xf4, 0x20, 0x87, 0xaa, 0x6f, 0x3b, 0x3b, 0x9b,
0x0a, 0x96, 0x34, 0x45, 0xe2, 0x7c, 0xba, 0xee, 0xc4, 0x02, 0xca, 0x8c, 0x17, 0x11, 0xc8, 0x22,
0x2e, 0x28, 0x37, 0xe8, 0xc1, 0xef, 0xf4, 0x32, 0x13, 0xec, 0xa7, 0xeb, 0xcd, 0x93, 0xd9, 0x2f,
0xf7, 0x72, 0xde, 0xf6, 0x24, 0x76, 0xce, 0x8f, 0xb4, 0x2c, 0xdf, 0x70, 0xf8, 0xc1, 0x9b, 0xf0,
0xd4, 0xca, 0x37, 0x8d, 0x2f, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x42, 0xe4, 0x4b, 0x9b,
0x03, 0x00, 0x00,
}

View File

@ -0,0 +1,538 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: src/google/protobuf/type.proto
/*
Package ptype is a generated protocol buffer package.
It is generated from these files:
src/google/protobuf/type.proto
It has these top-level messages:
Type
Field
Enum
EnumValue
Option
*/
package ptype
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/any"
import google_protobuf1 "google.golang.org/genproto/protobuf/source_context"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The syntax in which a protocol buffer element is defined.
type Syntax int32
const (
// Syntax `proto2`.
Syntax_SYNTAX_PROTO2 Syntax = 0
// Syntax `proto3`.
Syntax_SYNTAX_PROTO3 Syntax = 1
)
var Syntax_name = map[int32]string{
0: "SYNTAX_PROTO2",
1: "SYNTAX_PROTO3",
}
var Syntax_value = map[string]int32{
"SYNTAX_PROTO2": 0,
"SYNTAX_PROTO3": 1,
}
func (x Syntax) String() string {
return proto.EnumName(Syntax_name, int32(x))
}
func (Syntax) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
// Basic field types.
type Field_Kind int32
const (
// Field type unknown.
Field_TYPE_UNKNOWN Field_Kind = 0
// Field type double.
Field_TYPE_DOUBLE Field_Kind = 1
// Field type float.
Field_TYPE_FLOAT Field_Kind = 2
// Field type int64.
Field_TYPE_INT64 Field_Kind = 3
// Field type uint64.
Field_TYPE_UINT64 Field_Kind = 4
// Field type int32.
Field_TYPE_INT32 Field_Kind = 5
// Field type fixed64.
Field_TYPE_FIXED64 Field_Kind = 6
// Field type fixed32.
Field_TYPE_FIXED32 Field_Kind = 7
// Field type bool.
Field_TYPE_BOOL Field_Kind = 8
// Field type string.
Field_TYPE_STRING Field_Kind = 9
// Field type group. Proto2 syntax only, and deprecated.
Field_TYPE_GROUP Field_Kind = 10
// Field type message.
Field_TYPE_MESSAGE Field_Kind = 11
// Field type bytes.
Field_TYPE_BYTES Field_Kind = 12
// Field type uint32.
Field_TYPE_UINT32 Field_Kind = 13
// Field type enum.
Field_TYPE_ENUM Field_Kind = 14
// Field type sfixed32.
Field_TYPE_SFIXED32 Field_Kind = 15
// Field type sfixed64.
Field_TYPE_SFIXED64 Field_Kind = 16
// Field type sint32.
Field_TYPE_SINT32 Field_Kind = 17
// Field type sint64.
Field_TYPE_SINT64 Field_Kind = 18
)
var Field_Kind_name = map[int32]string{
0: "TYPE_UNKNOWN",
1: "TYPE_DOUBLE",
2: "TYPE_FLOAT",
3: "TYPE_INT64",
4: "TYPE_UINT64",
5: "TYPE_INT32",
6: "TYPE_FIXED64",
7: "TYPE_FIXED32",
8: "TYPE_BOOL",
9: "TYPE_STRING",
10: "TYPE_GROUP",
11: "TYPE_MESSAGE",
12: "TYPE_BYTES",
13: "TYPE_UINT32",
14: "TYPE_ENUM",
15: "TYPE_SFIXED32",
16: "TYPE_SFIXED64",
17: "TYPE_SINT32",
18: "TYPE_SINT64",
}
var Field_Kind_value = map[string]int32{
"TYPE_UNKNOWN": 0,
"TYPE_DOUBLE": 1,
"TYPE_FLOAT": 2,
"TYPE_INT64": 3,
"TYPE_UINT64": 4,
"TYPE_INT32": 5,
"TYPE_FIXED64": 6,
"TYPE_FIXED32": 7,
"TYPE_BOOL": 8,
"TYPE_STRING": 9,
"TYPE_GROUP": 10,
"TYPE_MESSAGE": 11,
"TYPE_BYTES": 12,
"TYPE_UINT32": 13,
"TYPE_ENUM": 14,
"TYPE_SFIXED32": 15,
"TYPE_SFIXED64": 16,
"TYPE_SINT32": 17,
"TYPE_SINT64": 18,
}
func (x Field_Kind) String() string {
return proto.EnumName(Field_Kind_name, int32(x))
}
func (Field_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
// Whether a field is optional, required, or repeated.
type Field_Cardinality int32
const (
// For fields with unknown cardinality.
Field_CARDINALITY_UNKNOWN Field_Cardinality = 0
// For optional fields.
Field_CARDINALITY_OPTIONAL Field_Cardinality = 1
// For required fields. Proto2 syntax only.
Field_CARDINALITY_REQUIRED Field_Cardinality = 2
// For repeated fields.
Field_CARDINALITY_REPEATED Field_Cardinality = 3
)
var Field_Cardinality_name = map[int32]string{
0: "CARDINALITY_UNKNOWN",
1: "CARDINALITY_OPTIONAL",
2: "CARDINALITY_REQUIRED",
3: "CARDINALITY_REPEATED",
}
var Field_Cardinality_value = map[string]int32{
"CARDINALITY_UNKNOWN": 0,
"CARDINALITY_OPTIONAL": 1,
"CARDINALITY_REQUIRED": 2,
"CARDINALITY_REPEATED": 3,
}
func (x Field_Cardinality) String() string {
return proto.EnumName(Field_Cardinality_name, int32(x))
}
func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 1} }
// A protocol buffer message type.
type Type struct {
// The fully qualified message name.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// The list of fields.
Fields []*Field `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"`
// The list of types appearing in `oneof` definitions in this type.
Oneofs []string `protobuf:"bytes,3,rep,name=oneofs" json:"oneofs,omitempty"`
// The protocol buffer options.
Options []*Option `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"`
// The source context.
SourceContext *google_protobuf1.SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"`
// The source syntax.
Syntax Syntax `protobuf:"varint,6,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"`
}
func (m *Type) Reset() { *m = Type{} }
func (m *Type) String() string { return proto.CompactTextString(m) }
func (*Type) ProtoMessage() {}
func (*Type) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Type) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Type) GetFields() []*Field {
if m != nil {
return m.Fields
}
return nil
}
func (m *Type) GetOneofs() []string {
if m != nil {
return m.Oneofs
}
return nil
}
func (m *Type) GetOptions() []*Option {
if m != nil {
return m.Options
}
return nil
}
func (m *Type) GetSourceContext() *google_protobuf1.SourceContext {
if m != nil {
return m.SourceContext
}
return nil
}
func (m *Type) GetSyntax() Syntax {
if m != nil {
return m.Syntax
}
return Syntax_SYNTAX_PROTO2
}
// A single field of a message type.
type Field struct {
// The field type.
Kind Field_Kind `protobuf:"varint,1,opt,name=kind,enum=google.protobuf.Field_Kind" json:"kind,omitempty"`
// The field cardinality.
Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"`
// The field number.
Number int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
// The field name.
Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
// The field type URL, without the scheme, for message or enumeration
// types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`.
TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// The index of the field type in `Type.oneofs`, for message or enumeration
// types. The first type has index 1; zero means the type is not in the list.
OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
// Whether to use alternative packed wire representation.
Packed bool `protobuf:"varint,8,opt,name=packed" json:"packed,omitempty"`
// The protocol buffer options.
Options []*Option `protobuf:"bytes,9,rep,name=options" json:"options,omitempty"`
// The field JSON name.
JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
// The string value of the default value of this field. Proto2 syntax only.
DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
}
func (m *Field) Reset() { *m = Field{} }
func (m *Field) String() string { return proto.CompactTextString(m) }
func (*Field) ProtoMessage() {}
func (*Field) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Field) GetKind() Field_Kind {
if m != nil {
return m.Kind
}
return Field_TYPE_UNKNOWN
}
func (m *Field) GetCardinality() Field_Cardinality {
if m != nil {
return m.Cardinality
}
return Field_CARDINALITY_UNKNOWN
}
func (m *Field) GetNumber() int32 {
if m != nil {
return m.Number
}
return 0
}
func (m *Field) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Field) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
}
return ""
}
func (m *Field) GetOneofIndex() int32 {
if m != nil {
return m.OneofIndex
}
return 0
}
func (m *Field) GetPacked() bool {
if m != nil {
return m.Packed
}
return false
}
func (m *Field) GetOptions() []*Option {
if m != nil {
return m.Options
}
return nil
}
func (m *Field) GetJsonName() string {
if m != nil {
return m.JsonName
}
return ""
}
func (m *Field) GetDefaultValue() string {
if m != nil {
return m.DefaultValue
}
return ""
}
// Enum type definition.
type Enum struct {
// Enum type name.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Enum value definitions.
Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue" json:"enumvalue,omitempty"`
// Protocol buffer options.
Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"`
// The source context.
SourceContext *google_protobuf1.SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"`
// The source syntax.
Syntax Syntax `protobuf:"varint,5,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"`
}
func (m *Enum) Reset() { *m = Enum{} }
func (m *Enum) String() string { return proto.CompactTextString(m) }
func (*Enum) ProtoMessage() {}
func (*Enum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Enum) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Enum) GetEnumvalue() []*EnumValue {
if m != nil {
return m.Enumvalue
}
return nil
}
func (m *Enum) GetOptions() []*Option {
if m != nil {
return m.Options
}
return nil
}
func (m *Enum) GetSourceContext() *google_protobuf1.SourceContext {
if m != nil {
return m.SourceContext
}
return nil
}
func (m *Enum) GetSyntax() Syntax {
if m != nil {
return m.Syntax
}
return Syntax_SYNTAX_PROTO2
}
// Enum value definition.
type EnumValue struct {
// Enum value name.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Enum value number.
Number int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
// Protocol buffer options.
Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"`
}
func (m *EnumValue) Reset() { *m = EnumValue{} }
func (m *EnumValue) String() string { return proto.CompactTextString(m) }
func (*EnumValue) ProtoMessage() {}
func (*EnumValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *EnumValue) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *EnumValue) GetNumber() int32 {
if m != nil {
return m.Number
}
return 0
}
func (m *EnumValue) GetOptions() []*Option {
if m != nil {
return m.Options
}
return nil
}
// A protocol buffer option, which can be attached to a message, field,
// enumeration, etc.
type Option struct {
// The option's name. For protobuf built-in options (options defined in
// descriptor.proto), this is the short name. For example, `"map_entry"`.
// For custom options, it should be the fully-qualified name. For example,
// `"google.api.http"`.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// The option's value packed in an Any message. If the value is a primitive,
// the corresponding wrapper type defined in google/protobuf/wrappers.proto
// should be used. If the value is an enum, it should be stored as an int32
// value using the google.protobuf.Int32Value type.
Value *google_protobuf.Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
func (m *Option) Reset() { *m = Option{} }
func (m *Option) String() string { return proto.CompactTextString(m) }
func (*Option) ProtoMessage() {}
func (*Option) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Option) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Option) GetValue() *google_protobuf.Any {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Type)(nil), "google.protobuf.Type")
proto.RegisterType((*Field)(nil), "google.protobuf.Field")
proto.RegisterType((*Enum)(nil), "google.protobuf.Enum")
proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue")
proto.RegisterType((*Option)(nil), "google.protobuf.Option")
proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value)
proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value)
proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value)
}
func init() { proto.RegisterFile("src/google/protobuf/type.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 813 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x8f, 0xda, 0x46,
0x14, 0x8f, 0x8d, 0xf1, 0xe2, 0xc7, 0xc2, 0x4e, 0x26, 0x51, 0xe2, 0x6c, 0xa4, 0x2d, 0xa2, 0x3d,
0xa0, 0x1c, 0x8c, 0x0a, 0xab, 0x55, 0xa5, 0x9e, 0x60, 0xf1, 0x52, 0xb4, 0xc4, 0x76, 0x07, 0xd3,
0x64, 0x7b, 0x41, 0x5e, 0x98, 0x45, 0x24, 0x66, 0x8c, 0xb0, 0xdd, 0x2e, 0x87, 0x7e, 0x84, 0x7e,
0x89, 0x1e, 0x7b, 0xee, 0x87, 0xe8, 0x47, 0xea, 0xad, 0xd5, 0x8c, 0xc1, 0x98, 0x3f, 0x95, 0xd2,
0xe6, 0x82, 0x78, 0xbf, 0xf7, 0x7b, 0xff, 0x9f, 0xdf, 0xc0, 0x45, 0xb8, 0x1c, 0xd7, 0xa7, 0x41,
0x30, 0xf5, 0x69, 0x7d, 0xb1, 0x0c, 0xa2, 0xe0, 0x3e, 0x7e, 0xa8, 0x47, 0xab, 0x05, 0x35, 0x84,
0x84, 0xcf, 0x12, 0x9d, 0xb1, 0xd1, 0x9d, 0xbf, 0xda, 0x27, 0x7b, 0x6c, 0x95, 0x68, 0xcf, 0xbf,
0xda, 0x57, 0x85, 0x41, 0xbc, 0x1c, 0xd3, 0xd1, 0x38, 0x60, 0x11, 0x7d, 0x8c, 0x12, 0x56, 0xf5,
0x57, 0x19, 0x14, 0x77, 0xb5, 0xa0, 0x18, 0x83, 0xc2, 0xbc, 0x39, 0xd5, 0xa5, 0x8a, 0x54, 0xd3,
0x88, 0xf8, 0x8f, 0x0d, 0x50, 0x1f, 0x66, 0xd4, 0x9f, 0x84, 0xba, 0x5c, 0xc9, 0xd5, 0x8a, 0x8d,
0x17, 0xc6, 0x5e, 0x7c, 0xe3, 0x86, 0xab, 0xc9, 0x9a, 0x85, 0x5f, 0x80, 0x1a, 0x30, 0x1a, 0x3c,
0x84, 0x7a, 0xae, 0x92, 0xab, 0x69, 0x64, 0x2d, 0xe1, 0xaf, 0xe1, 0x24, 0x58, 0x44, 0xb3, 0x80,
0x85, 0xba, 0x22, 0x1c, 0xbd, 0x3c, 0x70, 0x64, 0x0b, 0x3d, 0xd9, 0xf0, 0xb0, 0x09, 0xe5, 0xdd,
0x7c, 0xf5, 0x7c, 0x45, 0xaa, 0x15, 0x1b, 0x17, 0x07, 0x96, 0x03, 0x41, 0xbb, 0x4e, 0x58, 0xa4,
0x14, 0x66, 0x45, 0x5c, 0x07, 0x35, 0x5c, 0xb1, 0xc8, 0x7b, 0xd4, 0xd5, 0x8a, 0x54, 0x2b, 0x1f,
0x09, 0x3c, 0x10, 0x6a, 0xb2, 0xa6, 0x55, 0xff, 0x50, 0x21, 0x2f, 0x8a, 0xc2, 0x75, 0x50, 0x3e,
0xce, 0xd8, 0x44, 0x34, 0xa4, 0xdc, 0x78, 0x7d, 0xbc, 0x74, 0xe3, 0x76, 0xc6, 0x26, 0x44, 0x10,
0x71, 0x07, 0x8a, 0x63, 0x6f, 0x39, 0x99, 0x31, 0xcf, 0x9f, 0x45, 0x2b, 0x5d, 0x16, 0x76, 0xd5,
0x7f, 0xb1, 0xbb, 0xde, 0x32, 0x49, 0xd6, 0x8c, 0xf7, 0x90, 0xc5, 0xf3, 0x7b, 0xba, 0xd4, 0x73,
0x15, 0xa9, 0x96, 0x27, 0x6b, 0x29, 0x9d, 0x8f, 0x92, 0x99, 0xcf, 0x2b, 0x28, 0xf0, 0xe5, 0x18,
0xc5, 0x4b, 0x5f, 0xd4, 0xa7, 0x91, 0x13, 0x2e, 0x0f, 0x97, 0x3e, 0xfe, 0x02, 0x8a, 0xa2, 0xf9,
0xa3, 0x19, 0x9b, 0xd0, 0x47, 0xfd, 0x44, 0xf8, 0x02, 0x01, 0xf5, 0x38, 0xc2, 0xe3, 0x2c, 0xbc,
0xf1, 0x47, 0x3a, 0xd1, 0x0b, 0x15, 0xa9, 0x56, 0x20, 0x6b, 0x29, 0x3b, 0x2b, 0xed, 0x13, 0x67,
0xf5, 0x1a, 0xb4, 0x0f, 0x61, 0xc0, 0x46, 0x22, 0x3f, 0x10, 0x79, 0x14, 0x38, 0x60, 0xf1, 0x1c,
0xbf, 0x84, 0xd2, 0x84, 0x3e, 0x78, 0xb1, 0x1f, 0x8d, 0x7e, 0xf2, 0xfc, 0x98, 0xea, 0x45, 0x41,
0x38, 0x5d, 0x83, 0x3f, 0x70, 0xac, 0xfa, 0xa7, 0x0c, 0x0a, 0xef, 0x24, 0x46, 0x70, 0xea, 0xde,
0x39, 0xe6, 0x68, 0x68, 0xdd, 0x5a, 0xf6, 0x3b, 0x0b, 0x3d, 0xc1, 0x67, 0x50, 0x14, 0x48, 0xc7,
0x1e, 0xb6, 0xfb, 0x26, 0x92, 0x70, 0x19, 0x40, 0x00, 0x37, 0x7d, 0xbb, 0xe5, 0x22, 0x39, 0x95,
0x7b, 0x96, 0x7b, 0x75, 0x89, 0x72, 0xa9, 0xc1, 0x30, 0x01, 0x94, 0x2c, 0xa1, 0xd9, 0x40, 0xf9,
0x34, 0xc6, 0x4d, 0xef, 0xbd, 0xd9, 0xb9, 0xba, 0x44, 0xea, 0x2e, 0xd2, 0x6c, 0xa0, 0x13, 0x5c,
0x02, 0x4d, 0x20, 0x6d, 0xdb, 0xee, 0xa3, 0x42, 0xea, 0x73, 0xe0, 0x92, 0x9e, 0xd5, 0x45, 0x5a,
0xea, 0xb3, 0x4b, 0xec, 0xa1, 0x83, 0x20, 0xf5, 0xf0, 0xd6, 0x1c, 0x0c, 0x5a, 0x5d, 0x13, 0x15,
0x53, 0x46, 0xfb, 0xce, 0x35, 0x07, 0xe8, 0x74, 0x27, 0xad, 0x66, 0x03, 0x95, 0xd2, 0x10, 0xa6,
0x35, 0x7c, 0x8b, 0xca, 0xf8, 0x29, 0x94, 0x92, 0x10, 0x9b, 0x24, 0xce, 0xf6, 0xa0, 0xab, 0x4b,
0x84, 0xb6, 0x89, 0x24, 0x5e, 0x9e, 0xee, 0x00, 0x57, 0x97, 0x08, 0x57, 0x23, 0x28, 0x66, 0x76,
0x0b, 0xbf, 0x84, 0x67, 0xd7, 0x2d, 0xd2, 0xe9, 0x59, 0xad, 0x7e, 0xcf, 0xbd, 0xcb, 0xf4, 0x55,
0x87, 0xe7, 0x59, 0x85, 0xed, 0xb8, 0x3d, 0xdb, 0x6a, 0xf5, 0x91, 0xb4, 0xaf, 0x21, 0xe6, 0xf7,
0xc3, 0x1e, 0x31, 0x3b, 0x48, 0x3e, 0xd4, 0x38, 0x66, 0xcb, 0x35, 0x3b, 0x28, 0x57, 0xfd, 0x5b,
0x02, 0xc5, 0x64, 0xf1, 0xfc, 0xe8, 0x19, 0xf9, 0x06, 0x34, 0xca, 0xe2, 0x79, 0x32, 0xfe, 0xe4,
0x92, 0x9c, 0x1f, 0x2c, 0x15, 0xb7, 0x16, 0xcb, 0x40, 0xb6, 0xe4, 0xec, 0x32, 0xe6, 0xfe, 0xf7,
0xe1, 0x50, 0x3e, 0xef, 0x70, 0xe4, 0x3f, 0xed, 0x70, 0x7c, 0x00, 0x2d, 0x2d, 0xe1, 0x68, 0x17,
0xb6, 0x1f, 0xb6, 0xbc, 0xf3, 0x61, 0xff, 0xf7, 0x1a, 0xab, 0xdf, 0x81, 0x9a, 0x40, 0x47, 0x03,
0xbd, 0x81, 0xfc, 0xa6, 0xd5, 0xbc, 0xf0, 0xe7, 0x07, 0xee, 0x5a, 0x6c, 0x45, 0x12, 0xca, 0x1b,
0x03, 0xd4, 0xa4, 0x0e, 0xbe, 0x6c, 0x83, 0x3b, 0xcb, 0x6d, 0xbd, 0x1f, 0x39, 0xc4, 0x76, 0xed,
0x06, 0x7a, 0xb2, 0x0f, 0x35, 0x91, 0xd4, 0xfe, 0x05, 0x9e, 0x8d, 0x83, 0xf9, 0xbe, 0xc7, 0xb6,
0xc6, 0x9f, 0x10, 0x87, 0x4b, 0x8e, 0xf4, 0xe3, 0xfa, 0x01, 0x33, 0xa6, 0x81, 0xef, 0xb1, 0xa9,
0x11, 0x2c, 0xa7, 0xf5, 0x29, 0x65, 0x82, 0xbb, 0x7d, 0x8c, 0x16, 0xfc, 0x50, 0x7d, 0x2b, 0x7e,
0xff, 0x92, 0xa4, 0xdf, 0xe4, 0x5c, 0xd7, 0x69, 0xff, 0x2e, 0x5f, 0x74, 0x13, 0x53, 0x67, 0x93,
0xea, 0x3b, 0xea, 0xfb, 0xb7, 0x2c, 0xf8, 0x99, 0xf1, 0x00, 0xe1, 0xbd, 0x2a, 0xec, 0x9b, 0xff,
0x04, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x0a, 0x14, 0x97, 0x28, 0x07, 0x00, 0x00,
}

View File

@ -0,0 +1,70 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: src/google/protobuf/source_context.proto
/*
Package source_context is a generated protocol buffer package.
It is generated from these files:
src/google/protobuf/source_context.proto
It has these top-level messages:
SourceContext
*/
package source_context
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `SourceContext` represents information about the source of a
// protobuf element, like the file in which it is defined.
type SourceContext struct {
// The path-qualified name of the .proto file that contained the associated
// protobuf element. For example: `"google/protobuf/source_context.proto"`.
FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName" json:"file_name,omitempty"`
}
func (m *SourceContext) Reset() { *m = SourceContext{} }
func (m *SourceContext) String() string { return proto.CompactTextString(m) }
func (*SourceContext) ProtoMessage() {}
func (*SourceContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *SourceContext) GetFileName() string {
if m != nil {
return m.FileName
}
return ""
}
func init() {
proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext")
}
func init() { proto.RegisterFile("src/google/protobuf/source_context.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 188 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x28, 0x2e, 0x4a, 0xd6,
0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3,
0x2f, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03,
0x8b, 0x0b, 0xf1, 0x43, 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a,
0x43, 0xd4, 0x09, 0x49, 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a,
0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0xa6, 0x32,
0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x99, 0xe2, 0x24, 0x84, 0x62, 0x46, 0x00, 0x48, 0x38,
0x80, 0x31, 0xca, 0x11, 0xaa, 0x2c, 0x3d, 0x3f, 0x27, 0x31, 0x2f, 0x5d, 0x2f, 0xbf, 0x28, 0x5d,
0x3f, 0x3d, 0x35, 0x0f, 0xac, 0x09, 0x97, 0x33, 0xad, 0x51, 0xb9, 0x8b, 0x98, 0x98, 0xdd, 0x03,
0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0a, 0x80, 0xea, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1,
0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0xa9, 0x2c, 0x48, 0x2d, 0x4e, 0x62, 0x03, 0x1b, 0x67, 0x0c, 0x08,
0x00, 0x00, 0xff, 0xff, 0xc7, 0xbc, 0xab, 0x7f, 0x09, 0x01, 0x00, 0x00,
}

1
vendor/google.golang.org/grpc/AUTHORS generated vendored Normal file
View File

@ -0,0 +1 @@
Google Inc.

View File

@ -1,46 +1,32 @@
# How to contribute
We definitely welcome patches and contribution to grpc! Here are some guidelines
and information about how to do so.
We definitely welcome your patches and contributions to gRPC!
## Sending patches
### Getting started
1. Check out the code:
$ go get google.golang.org/grpc
$ cd $GOPATH/src/google.golang.org/grpc
1. Create a fork of the grpc-go repository.
1. Add your fork as a remote:
$ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git
1. Make changes, commit them.
1. Run the test suite:
$ make test
1. Push your changes to your fork:
$ git push fork ...
1. Open a pull request.
If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
## Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
## Filing Issues
When filing an issue, make sure to answer these five questions:
## Guidelines for Pull Requests
How to get your contributions merged smoothly and quickly.
- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy.
- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal).
- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists.
- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR.
- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity.
- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review).
- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change).
- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on.
1. What version of Go are you using (`go version`)?
2. What operating system and processor architecture are you using?
3. What did you do?
4. What did you expect to see?
5. What did you see instead?
### Contributing code
Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.
- Exceptions to the rules can be made if there's a compelling reason for doing so.

208
vendor/google.golang.org/grpc/LICENSE generated vendored
View File

@ -1,28 +1,186 @@
Copyright 2014, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for any such Derivative Works as a whole, provided Your use,
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015-2017 gRPC authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the gRPC project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of gRPC, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of gRPC. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of gRPC or any code incorporated within this
implementation of gRPC constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of gRPC
shall terminate as of the date such litigation is filed.

View File

@ -1,8 +1,8 @@
#gRPC-Go
# gRPC-Go
[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](http://www.grpc.io/docs/quickstart/go.html) guide.
Installation
------------
@ -16,23 +16,7 @@ $ go get google.golang.org/grpc
Prerequisites
-------------
This requires Go 1.5 or later.
A note on the version used: significant performance improvements in benchmarks
of grpc-go have been seen by upgrading the go version from 1.5 to the latest
1.7.1.
From https://golang.org/doc/install, one way to install the latest version of go is:
```
$ GO_VERSION=1.7.1
$ OS=linux
$ ARCH=amd64
$ curl -O https://storage.googleapis.com/golang/go${GO_VERSION}.${OS}-${ARCH}.tar.gz
$ sudo tar -C /usr/local -xzf go$GO_VERSION.$OS-$ARCH.tar.gz
$ # Put go on the PATH, keep the usual installation dir
$ sudo ln -s /usr/local/go/bin/go /usr/bin/go
$ rm go$GO_VERSION.$OS-$ARCH.tar.gz
```
This requires Go 1.6 or later.
Constraints
-----------
@ -42,9 +26,13 @@ Documentation
-------------
See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
Performance
-----------
See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
Status
------
GA
General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages).
FAQ
---

View File

@ -1,33 +1,18 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
* Copyright 2016 gRPC authors.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* http://www.apache.org/licenses/LICENSE-2.0
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
@ -35,6 +20,7 @@ package grpc
import (
"fmt"
"net"
"sync"
"golang.org/x/net/context"
@ -60,6 +46,10 @@ type BalancerConfig struct {
// use to dial to a remote load balancer server. The Balancer implementations
// can ignore this if it does not need to talk to another party securely.
DialCreds credentials.TransportCredentials
// Dialer is the custom dialer the Balancer implementation can use to dial
// to a remote load balancer server. The Balancer implementations
// can ignore this if it doesn't need to talk to remote balancer.
Dialer func(context.Context, string) (net.Conn, error)
}
// BalancerGetOptions configures a Get call.
@ -385,6 +375,9 @@ func (rr *roundRobin) Notify() <-chan []Address {
func (rr *roundRobin) Close() error {
rr.mu.Lock()
defer rr.mu.Unlock()
if rr.done {
return errBalancerClosed
}
rr.done = true
if rr.w != nil {
rr.w.Close()

149
vendor/google.golang.org/grpc/call.go generated vendored
View File

@ -1,33 +1,18 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
* Copyright 2014 gRPC authors.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* http://www.apache.org/licenses/LICENSE-2.0
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
@ -36,13 +21,14 @@ package grpc
import (
"bytes"
"io"
"math"
"time"
"golang.org/x/net/context"
"golang.org/x/net/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
@ -72,28 +58,30 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
}
}
for {
if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32, inPayload); err != nil {
if c.maxReceiveMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
}
if err = recv(p, dopts.codec, stream, dopts.dc, reply, *c.maxReceiveMessageSize, inPayload); err != nil {
if err == io.EOF {
break
}
return
}
}
if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK {
if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
// Fix the order if necessary.
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
}
c.trailerMD = stream.Trailer()
if peer, ok := peer.FromContext(stream.Context()); ok {
c.peer = peer
}
return nil
}
// sendRequest writes out various information of an RPC such as Context and Message.
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
stream, err := t.NewStream(ctx, callHdr)
if err != nil {
return nil, err
}
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
defer func() {
if err != nil {
// If err is connection error, t will be closed, no need to close stream here.
@ -116,7 +104,13 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
}
outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
if err != nil {
return nil, Errorf(codes.Internal, "grpc: %v", err)
return err
}
if c.maxSendMessageSize == nil {
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
if len(outBuf) > *c.maxSendMessageSize {
return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(outBuf), *c.maxSendMessageSize)
}
err = t.Write(stream, outBuf, opts)
if err == nil && outPayload != nil {
@ -127,10 +121,10 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
// recvResponse to get the final status.
if err != nil && err != io.EOF {
return nil, err
return err
}
// Sent successfully.
return stream, nil
return nil
}
// Invoke sends the RPC request on the wire and returns after response is received.
@ -145,14 +139,18 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
c := defaultCallInfo
if mc, ok := cc.getMethodConfig(method); ok {
c.failFast = !mc.WaitForReady
if mc.Timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
defer cancel()
}
mc := cc.GetMethodConfig(method)
if mc.WaitForReady != nil {
c.failFast = !*mc.WaitForReady
}
if mc.Timeout != nil && *mc.Timeout >= 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
defer cancel()
}
opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
if err := o.before(&c); err != nil {
return toRPCErr(err)
@ -163,6 +161,10 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
o.after(&c)
}
}()
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
if EnableTracing {
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
defer c.traceInfo.tr.Finish()
@ -179,26 +181,25 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
}
}()
}
ctx = newContextWithRPCInfo(ctx)
sh := cc.dopts.copts.StatsHandler
if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
}
defer func() {
if sh != nil {
defer func() {
end := &stats.End{
Client: true,
EndTime: time.Now(),
Error: e,
}
sh.HandleRPC(ctx, end)
}
}()
}()
}
topts := &transport.Options{
Last: true,
Delay: false,
@ -220,6 +221,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
if c.creds != nil {
callHdr.Creds = c.creds
}
gopts := BalancerGetOptions{
BlockingWait: !c.failFast,
@ -227,7 +231,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
t, put, err = cc.getTransport(ctx, gopts)
if err != nil {
// TODO(zhaoq): Probably revisit the error handling.
if _, ok := err.(*rpcError); ok {
if _, ok := status.FromError(err); ok {
return err
}
if err == errConnClosing || err == errConnUnavailable {
@ -242,19 +246,35 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
}
stream, err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, t, args, topts)
stream, err = t.NewStream(ctx, callHdr)
if err != nil {
if put != nil {
if _, ok := err.(transport.ConnectionError); ok {
// If error is connection error, transport was sending data on wire,
// and we are not sure if anything has been sent on wire.
// If error is not connection error, we are sure nothing has been sent.
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
}
put()
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return toRPCErr(err)
}
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, &c, callHdr, stream, t, args, topts)
if err != nil {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put()
put = nil
}
// Retry a non-failfast RPC when
// i) there is a connection error; or
// ii) the server started to drain before this RPC was initiated.
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
if c.failFast {
return toRPCErr(err)
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return toRPCErr(err)
@ -262,13 +282,13 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
if err != nil {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put()
put = nil
}
if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
if c.failFast {
return toRPCErr(err)
}
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
continue
}
return toRPCErr(err)
@ -278,9 +298,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
}
t.CloseStream(stream, nil)
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{
bytesSent: stream.BytesSent(),
bytesReceived: stream.BytesReceived(),
})
put()
put = nil
}
return Errorf(stream.StatusCode(), "%s", stream.StatusDesc())
return stream.Status().Err()
}
}

View File

@ -1,33 +1,18 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
* Copyright 2014 gRPC authors.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* http://www.apache.org/licenses/LICENSE-2.0
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
@ -45,6 +30,7 @@ import (
"golang.org/x/net/trace"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/transport"
)
@ -55,8 +41,7 @@ var (
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
// underlying connections within the specified timeout.
// DEPRECATED: Please use context.DeadlineExceeded instead. This error will be
// removed in Q1 2017.
// DEPRECATED: Please use context.DeadlineExceeded instead.
ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
// errNoTransportSecurity indicates that there is no transport security
@ -78,7 +63,8 @@ var (
errConnClosing = errors.New("grpc: the connection is closing")
// errConnUnavailable indicates that the connection is unavailable.
errConnUnavailable = errors.New("grpc: the connection is unavailable")
errNoAddr = errors.New("grpc: there is no address available to dial")
// errBalancerClosed indicates that the balancer is closed.
errBalancerClosed = errors.New("grpc: balancer is closed")
// minimum time to give a connection to complete
minConnectTimeout = 20 * time.Second
)
@ -86,23 +72,57 @@ var (
// dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial.
type dialOptions struct {
unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor
codec Codec
cp Compressor
dc Decompressor
bs backoffStrategy
balancer Balancer
block bool
insecure bool
timeout time.Duration
scChan <-chan ServiceConfig
copts transport.ConnectOptions
unaryInt UnaryClientInterceptor
streamInt StreamClientInterceptor
codec Codec
cp Compressor
dc Decompressor
bs backoffStrategy
balancer Balancer
block bool
insecure bool
timeout time.Duration
scChan <-chan ServiceConfig
copts transport.ConnectOptions
callOptions []CallOption
}
const (
defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
defaultClientMaxSendMessageSize = 1024 * 1024 * 4
)
// DialOption configures how we set up the connection.
type DialOption func(*dialOptions)
// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func WithInitialWindowSize(s int32) DialOption {
return func(o *dialOptions) {
o.copts.InitialWindowSize = s
}
}
// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection.
// The lower bound for window size is 64K and any value smaller than that will be ignored.
func WithInitialConnWindowSize(s int32) DialOption {
return func(o *dialOptions) {
o.copts.InitialConnWindowSize = s
}
}
// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
func WithMaxMsgSize(s int) DialOption {
return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
}
// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection.
func WithDefaultCallOptions(cos ...CallOption) DialOption {
return func(o *dialOptions) {
o.callOptions = append(o.callOptions, cos...)
}
}
// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
func WithCodec(c Codec) DialOption {
return func(o *dialOptions) {
@ -194,7 +214,7 @@ func WithTransportCredentials(creds credentials.TransportCredentials) DialOption
}
// WithPerRPCCredentials returns a DialOption which sets
// credentials which will place auth state on each outbound RPC.
// credentials and places auth state on each outbound RPC.
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
return func(o *dialOptions) {
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
@ -231,7 +251,7 @@ func WithStatsHandler(h stats.Handler) DialOption {
}
}
// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors.
// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors.
// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
// address and won't try to reconnect.
// The default value of FailOnNonTempDialError is false.
@ -249,6 +269,13 @@ func WithUserAgent(s string) DialOption {
}
}
// WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
return func(o *dialOptions) {
o.copts.KeepaliveParams = kp
}
}
// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs.
func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
return func(o *dialOptions) {
@ -263,25 +290,50 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
}
}
// WithAuthority returns a DialOption that specifies the value to be used as
// the :authority pseudo-header. This value only works with WithInsecure and
// has no effect if TransportCredentials are present.
func WithAuthority(a string) DialOption {
return func(o *dialOptions) {
o.copts.Authority = a
}
}
// Dial creates a client connection to the given target.
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
return DialContext(context.Background(), target, opts...)
}
// DialContext creates a client connection to the given target. ctx can be used to
// cancel or expire the pending connecting. Once this function returns, the
// cancel or expire the pending connection. Once this function returns, the
// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
// to terminate all the pending operations after this function returns.
// This is the EXPERIMENTAL API.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
target: target,
conns: make(map[Address]*addrConn),
}
cc.ctx, cc.cancel = context.WithCancel(context.Background())
for _, opt := range opts {
opt(&cc.dopts)
}
cc.mkp = cc.dopts.copts.KeepaliveParams
if cc.dopts.copts.Dialer == nil {
cc.dopts.copts.Dialer = newProxyDialer(
func(ctx context.Context, addr string) (net.Conn, error) {
return dialContext(ctx, "tcp", addr)
},
)
}
if cc.dopts.copts.UserAgent != "" {
cc.dopts.copts.UserAgent += " " + grpcUA
} else {
cc.dopts.copts.UserAgent = grpcUA
}
if cc.dopts.timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
@ -300,15 +352,16 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
scSet := false
if cc.dopts.scChan != nil {
// Wait for the initial service config.
// Try to get an initial service config.
select {
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = sc
scSet = true
}
case <-ctx.Done():
return nil, ctx.Err()
default:
}
}
// Set defaults.
@ -321,54 +374,47 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
creds := cc.dopts.copts.TransportCredentials
if creds != nil && creds.Info().ServerName != "" {
cc.authority = creds.Info().ServerName
} else if cc.dopts.insecure && cc.dopts.copts.Authority != "" {
cc.authority = cc.dopts.copts.Authority
} else {
colonPos := strings.LastIndex(target, ":")
if colonPos == -1 {
colonPos = len(target)
}
cc.authority = target[:colonPos]
cc.authority = target
}
var ok bool
waitC := make(chan error, 1)
go func() {
var addrs []Address
defer close(waitC)
if cc.dopts.balancer == nil && cc.sc.LB != nil {
cc.dopts.balancer = cc.sc.LB
}
if cc.dopts.balancer == nil {
// Connect to target directly if balancer is nil.
addrs = append(addrs, Address{Addr: target})
} else {
if cc.dopts.balancer != nil {
var credsClone credentials.TransportCredentials
if creds != nil {
credsClone = creds.Clone()
}
config := BalancerConfig{
DialCreds: credsClone,
Dialer: cc.dopts.copts.Dialer,
}
if err := cc.dopts.balancer.Start(target, config); err != nil {
waitC <- err
return
}
ch := cc.dopts.balancer.Notify()
if ch == nil {
// There is no name resolver installed.
addrs = append(addrs, Address{Addr: target})
} else {
addrs, ok = <-ch
if !ok || len(addrs) == 0 {
waitC <- errNoAddr
return
if ch != nil {
if cc.dopts.block {
doneChan := make(chan struct{})
go cc.lbWatcher(doneChan)
<-doneChan
} else {
go cc.lbWatcher(nil)
}
}
}
for _, a := range addrs {
if err := cc.resetAddrConn(a, false, nil); err != nil {
waitC <- err
return
}
}
close(waitC)
// No balancer, or no resolver within the balancer. Connect directly.
if err := cc.resetAddrConn(Address{Addr: target}, cc.dopts.block, nil); err != nil {
waitC <- err
return
}
}()
select {
case <-ctx.Done():
@ -378,16 +424,21 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
return nil, err
}
}
// If balancer is nil or balancer.Notify() is nil, ok will be false here.
// The lbWatcher goroutine will not be created.
if ok {
go cc.lbWatcher()
if cc.dopts.scChan != nil && !scSet {
// Blocking wait for the initial service config.
select {
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = sc
}
case <-ctx.Done():
return nil, ctx.Err()
}
}
if cc.dopts.scChan != nil {
go cc.scWatcher()
}
return cc, nil
}
@ -436,9 +487,14 @@ type ClientConn struct {
mu sync.RWMutex
sc ServiceConfig
conns map[Address]*addrConn
// Keepalive parameter can be udated if a GoAway is received.
mkp keepalive.ClientParameters
}
func (cc *ClientConn) lbWatcher() {
// lbWatcher watches the Notify channel of the balancer in cc and manages
// connections accordingly. If doneChan is not nil, it is closed after the
// first successfull connection is made.
func (cc *ClientConn) lbWatcher(doneChan chan struct{}) {
for addrs := range cc.dopts.balancer.Notify() {
var (
add []Address // Addresses need to setup connections.
@ -465,7 +521,15 @@ func (cc *ClientConn) lbWatcher() {
}
cc.mu.Unlock()
for _, a := range add {
cc.resetAddrConn(a, true, nil)
if doneChan != nil {
err := cc.resetAddrConn(a, true, nil)
if err == nil {
close(doneChan)
doneChan = nil
}
} else {
cc.resetAddrConn(a, false, nil)
}
}
for _, c := range del {
c.tearDown(errConnDrain)
@ -494,12 +558,15 @@ func (cc *ClientConn) scWatcher() {
// resetAddrConn creates an addrConn for addr and adds it to cc.conns.
// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason.
// If tearDownErr is nil, errConnDrain will be used instead.
func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error {
func (cc *ClientConn) resetAddrConn(addr Address, block bool, tearDownErr error) error {
ac := &addrConn{
cc: cc,
addr: addr,
dopts: cc.dopts,
}
cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = cc.mkp
cc.mu.RUnlock()
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
ac.stateCV = sync.NewCond(&ac.mu)
if EnableTracing {
@ -544,8 +611,7 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
stale.tearDown(tearDownErr)
}
}
// skipWait may overwrite the decision in ac.dopts.block.
if ac.dopts.block && !skipWait {
if block {
if err := ac.resetTransport(false); err != nil {
if err != errConnClosing {
// Tear down ac and delete it from cc.conns.
@ -578,12 +644,23 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
return nil
}
// TODO: Avoid the locking here.
func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) {
// GetMethodConfig gets the method config of the input method.
// If there's an exact match for input method (i.e. /service/method), we return
// the corresponding MethodConfig.
// If there isn't an exact match for the input method, we look for the default config
// under the service (i.e /service/). If there is a default MethodConfig for
// the serivce, we return it.
// Otherwise, we return an empty MethodConfig.
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
// TODO: Avoid the locking here.
cc.mu.RLock()
defer cc.mu.RUnlock()
m, ok = cc.sc.Methods[method]
return
m, ok := cc.sc.Methods[method]
if !ok {
i := strings.LastIndex(method, "/")
m, _ = cc.sc.Methods[method[:i+1]]
}
return m
}
func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
@ -624,6 +701,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions)
}
if !ok {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false})
put()
}
return nil, nil, errConnClosing
@ -631,6 +709,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions)
t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait)
if err != nil {
if put != nil {
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false})
put()
}
return nil, nil, err
@ -682,6 +761,20 @@ type addrConn struct {
tearDownErr error
}
// adjustParams updates parameters used to create transports upon
// receiving a GoAway.
func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
switch r {
case transport.TooManyPings:
v := 2 * ac.dopts.copts.KeepaliveParams.Time
ac.cc.mu.Lock()
if v > ac.cc.mkp.Time {
ac.cc.mkp.Time = v
}
ac.cc.mu.Unlock()
}
}
// printf records an event in ac's event log, unless ac has been closed.
// REQUIRES ac.mu is held.
func (ac *addrConn) printf(format string, a ...interface{}) {
@ -766,6 +859,8 @@ func (ac *addrConn) resetTransport(closeTransport bool) error {
Metadata: ac.addr.Metadata,
}
newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts)
// Don't call cancel in success path due to a race in Go 1.6:
// https://github.com/golang/go/issues/15078.
if err != nil {
cancel()
@ -788,11 +883,14 @@ func (ac *addrConn) resetTransport(closeTransport bool) error {
}
ac.mu.Unlock()
closeTransport = false
timer := time.NewTimer(sleepTime - time.Since(connectTime))
select {
case <-time.After(sleepTime - time.Since(connectTime)):
case <-timer.C:
case <-ac.ctx.Done():
timer.Stop()
return ac.ctx.Err()
}
timer.Stop()
continue
}
ac.mu.Lock()
@ -836,6 +934,7 @@ func (ac *addrConn) transportMonitor() {
}
return
case <-t.GoAway():
ac.adjustParams(t.GetGoAwayReason())
// If GoAway happens without any network I/O error, ac is closed without shutting down the
// underlying transport (the transport will be closed when all the pending RPCs finished or
// failed.).
@ -844,9 +943,9 @@ func (ac *addrConn) transportMonitor() {
// In both cases, a new ac is created.
select {
case <-t.Error():
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
ac.cc.resetAddrConn(ac.addr, false, errNetworkIO)
default:
ac.cc.resetAddrConn(ac.addr, true, errConnDrain)
ac.cc.resetAddrConn(ac.addr, false, errConnDrain)
}
return
case <-t.Error():
@ -855,7 +954,8 @@ func (ac *addrConn) transportMonitor() {
t.Close()
return
case <-t.GoAway():
ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
ac.adjustParams(t.GetGoAwayReason())
ac.cc.resetAddrConn(ac.addr, false, errNetworkIO)
return
default:
}

Some files were not shown because too many files have changed in this diff Show More