diff --git a/.gitignore b/.gitignore
index f7ff514..ab3d931 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,3 +29,6 @@ tags
*.sw[o-p]
profile.cov
+
+#IDEA project files
+.idea
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index d5b73f4..7637d1a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.5
+FROM golang:1.6
RUN apt-get update && apt-get install -yqq aspell aspell-en libaspell-dev tesseract-ocr tesseract-ocr-eng imagemagick optipng exiftool libjpeg-progs webp
ADD docker/meme.traineddata /usr/share/tesseract-ocr/tessdata/meme.traineddata
ADD docker/imagemagick_policy.xml /etc/ImageMagick-6/policy.xml
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index d08750f..bb2bc0e 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -1,14 +1,19 @@
{
"ImportPath": "github.com/Imgur/mandible",
- "GoVersion": "go1.5",
- "Packages": [
- "./..."
- ],
+ "GoVersion": "go1.6",
"Deps": [
+ {
+ "ImportPath": "github.com/PagerDuty/godspeed",
+ "Rev": "ef757b820a7d6760a89641ac29541967eb6d9f05"
+ },
{
"ImportPath": "github.com/bradfitz/http2",
"Rev": "f8202bc903bda493ebba4aa54922d78430c2c42f"
},
+ {
+ "ImportPath": "github.com/ernado/selectel/storage",
+ "Rev": "10f520cf8312d6729981b6cd30f1085d0fce436b"
+ },
{
"ImportPath": "github.com/golang/glog",
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
@@ -41,10 +46,6 @@
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
},
- {
- "ImportPath": "golang.org/x/crypto/ssh/terminal",
- "Rev": "3760e016850398b85094c4c99e955b8c3dea5711"
- },
{
"ImportPath": "golang.org/x/net/context",
"Rev": "84afb0af0050ae286aa9ced0c29383c2a866a925"
@@ -53,30 +54,14 @@
"ImportPath": "golang.org/x/oauth2",
"Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9"
},
- {
- "ImportPath": "google.golang.org/api/bigquery/v2",
- "Rev": "0610a35668fd6881bec389e74208f0df92010e96"
- },
- {
- "ImportPath": "google.golang.org/api/container/v1beta1",
- "Rev": "0610a35668fd6881bec389e74208f0df92010e96"
- },
{
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "0610a35668fd6881bec389e74208f0df92010e96"
},
- {
- "ImportPath": "google.golang.org/api/pubsub/v1beta2",
- "Rev": "0610a35668fd6881bec389e74208f0df92010e96"
- },
{
"ImportPath": "google.golang.org/api/storage/v1",
"Rev": "0610a35668fd6881bec389e74208f0df92010e96"
},
- {
- "ImportPath": "google.golang.org/appengine",
- "Rev": "6bde959377a90acb53366051d7d587bfd7171354"
- },
{
"ImportPath": "google.golang.org/cloud",
"Rev": "0b21ed5434dc279f2b8ea3c02dc69135600bbb8b"
diff --git a/imagestore/factory.go b/imagestore/factory.go
index 7e15320..2c3a8b3 100644
--- a/imagestore/factory.go
+++ b/imagestore/factory.go
@@ -8,6 +8,7 @@ import (
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
+ selectel "github.com/ernado/selectel/storage"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
gcloud "google.golang.org/cloud"
@@ -40,6 +41,9 @@ func (this *Factory) NewImageStores() ImageStore {
case "memory":
store = NewInMemoryImageStore()
stores = append(stores, store)
+ case "selectel":
+ store = this.NewSelectelStore(configWrapper)
+ stores = append(stores, store)
default:
log.Fatalf("Unsupported store %s", configWrapper["Type"])
}
@@ -121,3 +125,15 @@ func (this *Factory) NewHashGenerator(store ImageStore) *HashGenerator {
hashGen.init()
return hashGen
}
+
+func (this *Factory) NewSelectelStore(conf map[string]string) ImageStore {
+ user, key, container, rootPath := conf["user"], conf["key"], conf["container"], conf["rootPath"]
+ client, err := selectel.New(user, key)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ mapper := NewNamePathMapper(conf["NamePathRegex"], conf["NamePathMap"])
+
+ return NewSelectelImageStore(client, mapper, container, rootPath)
+}
diff --git a/imagestore/selectelstore.go b/imagestore/selectelstore.go
new file mode 100644
index 0000000..eb2fb3e
--- /dev/null
+++ b/imagestore/selectelstore.go
@@ -0,0 +1,65 @@
+package imagestore
+
+import (
+ "fmt"
+ "github.com/ernado/selectel/storage"
+ "io"
+ "os"
+ "path"
+)
+
+type SelectelStore struct {
+ client storage.API
+ storeRoot string
+ container string
+ namePathMapper *NamePathMapper
+}
+
+func NewSelectelImageStore(client storage.API, mapper *NamePathMapper, container, root string) *SelectelStore {
+ return &SelectelStore{
+ client: client,
+ namePathMapper: mapper,
+ container: container,
+ storeRoot: root,
+ }
+}
+
+func (s *SelectelStore) Save(src string, obj *StoreObject) (*StoreObject, error) {
+ f, err := os.Open(src)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ pathToFile := s.toPath(obj)
+ container, name := s.toSelectelPath(pathToFile)
+ if err := s.client.Upload(f, container, name, obj.MimeType); err != nil {
+ return nil, fmt.Errorf("Selectel api returns error: %v", err)
+ }
+
+ obj.Url = s.client.URL(container, name)
+
+ return obj, nil
+}
+func (s *SelectelStore) Exists(obj *StoreObject) (bool, error) {
+ pathToFile := s.toPath(obj)
+ container, name := s.toSelectelPath(pathToFile)
+ _, err := s.client.ObjectInfo(container, name)
+ return err == nil, nil
+}
+func (s *SelectelStore) Get(obj *StoreObject) (io.ReadCloser, error) {
+ pathToFile := s.toPath(obj)
+ container, name := s.toSelectelPath(pathToFile)
+ return s.client.C(container).Object(name).GetReader()
+}
+func (s *SelectelStore) String() string {
+ return "SelectelStore"
+}
+
+func (s *SelectelStore) toPath(obj *StoreObject) string {
+ return s.storeRoot + "/" + s.namePathMapper.mapToPath(obj)
+}
+
+func (s *SelectelStore) toSelectelPath(fullPath string) (string, string) {
+ return path.Join(s.container, path.Dir(fullPath)), path.Base(fullPath)
+}
diff --git a/vendor/github.com/PagerDuty/godspeed/gspdtest/gspdtest.go b/vendor/github.com/PagerDuty/godspeed/gspdtest/gspdtest.go
new file mode 100644
index 0000000..4527db5
--- /dev/null
+++ b/vendor/github.com/PagerDuty/godspeed/gspdtest/gspdtest.go
@@ -0,0 +1,56 @@
+// Copyright 2014-2015 PagerDuty, Inc, et al. All rights reserved.
+// Use of this source code is governed by the BSD 3-Clause
+// license that can be found in the LICENSE file.
+
+// Package gspdtest is a package used by Godspeed for testing. This package
+// isn't really meant to be consumed by anyone.
+package gspdtest
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+)
+
+// Listener is a function which takes a *net.UDPConn and sends any data received
+// on it back over the c channel. This function is meant to be ran within a
+// goroutine. The ctrl channel is used to shut down the goroutine.
+func Listener(l *net.UDPConn, ctrl chan int, c chan []byte) {
+ for {
+ select {
+ case _, ok := <-ctrl:
+ if !ok {
+ close(c)
+ return
+ }
+ default:
+ buffer := make([]byte, 8193)
+
+ _, err := l.Read(buffer)
+
+ if err != nil {
+ continue
+ }
+
+ c <- bytes.Trim(buffer, "\x00")
+ }
+ }
+}
+
+// BuildListener is a function which builds a *net.UDPConn listening on localhost
+// on the port specified. It also returns a control channel and a return channel.
+func BuildListener(port int) (*net.UDPConn, chan int, chan []byte) {
+ addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("127.0.0.1:%d", port))
+
+ if err != nil {
+ panic(fmt.Sprintf("getting address for test listener failed, bailing out. Here's everything I know: %v", err))
+ }
+
+ l, err := net.ListenUDP("udp", addr)
+
+ if err != nil {
+ panic(fmt.Sprintf("unable to listen for traffic: %v", err))
+ }
+
+ return l, make(chan int), make(chan []byte)
+}
diff --git a/vendor/github.com/ernado/selectel/LICENSE b/vendor/github.com/ernado/selectel/LICENSE
new file mode 100644
index 0000000..d9d0d2e
--- /dev/null
+++ b/vendor/github.com/ernado/selectel/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Aleksandr Razumov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/ernado/selectel/storage/auth.go b/vendor/github.com/ernado/selectel/storage/auth.go
new file mode 100644
index 0000000..4e624d2
--- /dev/null
+++ b/vendor/github.com/ernado/selectel/storage/auth.go
@@ -0,0 +1,83 @@
+package storage
+
+import (
+ "errors"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+)
+
+const (
+ authURL = "https://auth.selcdn.ru/"
+ authUserHeader = "X-Auth-User"
+ authKeyHeader = "X-Auth-Key"
+ authExpireHeader = "X-Expire-Auth-Token"
+ storageURLHeader = "X-Storage-Url"
+ // tokenDurationAdd used to reduce duration of token
+ // to re-auth before token gets expired
+ tokenDurationAdd = 10 * time.Second
+)
+
+var (
+ // ErrorAuth occurs when client is unable to authenticate
+ ErrorAuth = errors.New("Authentication error")
+ // ErrorBadCredentials occurs when incorrect user/key provided
+ ErrorBadCredentials = errors.New("Bad auth credentials provided")
+)
+
+// Token returns current auth token
+func (c *Client) Token() string {
+ return c.token
+}
+
+// Auth performs authentication to selectel and stores token and storage url
+func (c *Client) Auth(user, key string) error {
+ if blank(user) || blank(key) {
+ return ErrorBadCredentials
+ }
+
+ request, _ := http.NewRequest(getMethod, authURL, nil)
+ request.Header.Add(authUserHeader, user)
+ request.Header.Add(authKeyHeader, key)
+
+ res, err := c.do(request)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode != http.StatusNoContent {
+ return ErrorAuth
+ }
+ expire, err := strconv.Atoi(res.Header.Get(authExpireHeader))
+ if err != nil {
+ return err
+ }
+
+ c.tokenExpire = expire
+ c.token = res.Header.Get(authTokenHeader)
+ if blank(c.token) {
+ return ErrorAuth
+ }
+ c.storageURL, err = url.Parse(res.Header.Get(storageURLHeader))
+ if err != nil || blank(c.storageURL.String()) {
+ return ErrorAuth
+ }
+
+ c.user, c.key = user, key
+ now := time.Now()
+ c.expireFrom = &now
+
+ return nil
+}
+
+// Expired returns true if token is expired or does not exist
+func (c *Client) Expired() bool {
+ if c.expireFrom == nil || blank(c.token) {
+ return true
+ }
+ duration := time.Duration(c.tokenExpire) * time.Second
+ expiredFrom := c.expireFrom.Add(duration).Add(tokenDurationAdd)
+ return expiredFrom.Before(time.Now())
+}
diff --git a/vendor/github.com/ernado/selectel/storage/container.go b/vendor/github.com/ernado/selectel/storage/container.go
new file mode 100644
index 0000000..6e6b69e
--- /dev/null
+++ b/vendor/github.com/ernado/selectel/storage/container.go
@@ -0,0 +1,224 @@
+package storage
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "strconv"
+)
+
+const (
+ containerMetaTypeHeader = "X-Container-Meta-Type"
+ containerPublic = "public"
+ containerPrivate = "private"
+ containerBytesUserHeader = "X-Container-Bytes-Used"
+ containerObjectCountHeader = "X-Container-Object-Count"
+)
+
+var (
+ // ErrorConianerNotEmpty occurs when requested container is not empty
+ ErrorConianerNotEmpty = errors.New("Unable to remove container with objects")
+)
+
+// Container is realization of ContainerAPI
+type Container struct {
+ name string
+ api API
+}
+
+// ContainerInfo is information about container
+type ContainerInfo struct {
+ BytesUsed uint64 `json:"bytes"`
+ ObjectCount uint64 `json:"count"`
+ Name string `json:"name"`
+ RecievedBytes uint64 `json:"rx_bytes"`
+ TransferedBytes uint64 `json:"tx_bytes"`
+ Type string `json:"type"`
+}
+
+// ContainerAPI is interface for selectel storage container
+type ContainerAPI interface {
+ Name() string
+ Upload(reader io.Reader, name, contentType string) error
+ UploadFile(filename string) error
+ URL(filename string) string
+ RemoveObject(name string) error
+ // Remove removes current container
+ Remove() error
+ // Create creates current container
+ Create(bool) error
+ // ObjectInfo returns info about object in container
+ ObjectInfo(name string) (ObjectInfo, error)
+ // Object returns object from container
+ Object(name string) ObjectAPI
+ ObjectsInfo() ([]ObjectInfo, error)
+ Objects() ([]ObjectAPI, error)
+ Info() (info ContainerInfo, err error)
+}
+
+// Upload reads all data from reader and uploads to contaier with filename and content type
+// shortcut to API.Upload
+func (c *Container) Upload(reader io.Reader, filename, contentType string) error {
+ return c.api.Upload(reader, c.name, filename, contentType)
+}
+
+// Name returns container name
+func (c *Container) Name() string {
+ return c.name
+}
+
+// Remove removes current container
+func (c *Container) Remove() error {
+ return c.api.RemoveContainer(c.name)
+}
+
+// Create creates current container
+func (c *Container) Create(private bool) error {
+ container, err := c.api.CreateContainer(c.name, private)
+ if err != nil {
+ return err
+ }
+ *c = *container.(*Container)
+ return nil
+}
+
+// URL returns url for object
+func (c *Container) URL(filename string) string {
+ return c.api.URL(c.name, filename)
+}
+
+// UploadFile to current container. Shortcut to API.UploadFile
+func (c *Container) UploadFile(filename string) error {
+ return c.api.UploadFile(filename, c.name)
+}
+
+// DeleteObject is shortcut to API.DeleteObject
+func (c *Container) RemoveObject(filename string) error {
+ return c.api.RemoveObject(c.name, filename)
+}
+
+func (c *Container) ObjectInfo(name string) (ObjectInfo, error) {
+ return c.api.ObjectInfo(c.name, name)
+}
+
+func (c *Container) Object(name string) ObjectAPI {
+ object := new(Object)
+ object.api = c.api
+ object.container = c
+ object.name = name
+ return object
+}
+
+// ObjectsInfo returns information about all objects in container
+func (c *Container) ObjectsInfo() ([]ObjectInfo, error) {
+ return c.api.ObjectsInfo(c.name)
+}
+
+// Objects returns all object from container
+func (c *Container) Objects() ([]ObjectAPI, error) {
+ info, err := c.ObjectsInfo()
+ if err != nil {
+ return nil, err
+ }
+ objects := []ObjectAPI{}
+ for _, object := range info {
+ objects = append(objects, c.Object(object.Name))
+ }
+ return objects, nil
+}
+
+func (c *Container) Info() (info ContainerInfo, err error) {
+ return c.api.ContainerInfo(c.name)
+}
+
+// C is shortcut to Client.Container
+func (c *Client) C(name string) ContainerAPI {
+ container := new(Container)
+ container.name = name
+ container.api = c
+ return container
+}
+
+// Container returns new ContainerAPI client binted to container name
+// Does no checks for container existance
+func (c *Client) Container(name string) ContainerAPI {
+ return c.C(name)
+}
+
+// CreateContainer creates new container and retuns it.
+// If container already exists, function will return existing container
+func (c *Client) CreateContainer(name string, private bool) (ContainerAPI, error) {
+ req, err := c.NewRequest(putMethod, nil, name)
+ if err != nil {
+ return nil, err
+ }
+ req.Header = http.Header{}
+ containerType := containerPublic
+ if private {
+ containerType = containerPrivate
+ }
+ req.Header.Add(containerMetaTypeHeader, containerType)
+ res, err := c.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusAccepted {
+ return c.Container(name), nil
+ }
+ return nil, ErrorBadResponce
+}
+
+// RemoveContainer removes container with provided name
+// Container should be empty before removing and must exist
+func (c *Client) RemoveContainer(name string) error {
+ req, err := c.NewRequest(deleteMethod, nil, name)
+ if err != nil {
+ return err
+ }
+ res, err := c.Do(req)
+ if err != nil {
+ return err
+ }
+ if res.StatusCode == http.StatusConflict {
+ return ErrorConianerNotEmpty
+ }
+ if res.StatusCode == http.StatusNotFound {
+ return ErrorObjectNotFound
+ }
+ if res.StatusCode == http.StatusNoContent {
+ return nil
+ }
+ return ErrorBadResponce
+}
+
+func (c *Client) ContainerInfo(name string) (info ContainerInfo, err error) {
+ req, err := c.NewRequest(headMethod, nil, name)
+ if err != nil {
+ return
+ }
+ res, err := c.Do(req)
+ if err != nil {
+ return
+ }
+
+ if res.StatusCode == http.StatusNotFound {
+ return info, ErrorObjectNotFound
+ }
+
+ if res.StatusCode != http.StatusNoContent {
+ return info, ErrorBadResponce
+ }
+
+ parse := func(key string) uint64 {
+ v, _ := strconv.ParseUint(res.Header.Get(key), uint64Base, uint64BitSize)
+ return v
+ }
+
+ info.RecievedBytes = parse(recievedBytesHeader)
+ info.TransferedBytes = parse(transferedBytesHeader)
+ info.BytesUsed = parse(containerBytesUserHeader)
+ info.Type = res.Header.Get(containerMetaTypeHeader)
+ info.ObjectCount = parse(containerObjectCountHeader)
+
+ return
+}
diff --git a/vendor/github.com/ernado/selectel/storage/object.go b/vendor/github.com/ernado/selectel/storage/object.go
new file mode 100644
index 0000000..be5abc8
--- /dev/null
+++ b/vendor/github.com/ernado/selectel/storage/object.go
@@ -0,0 +1,113 @@
+package storage
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+ "time"
+)
+
+const (
+ etagHeader = "etag"
+ contentLengthHeader = "Content-Length"
+ lastModifiedLayout = time.RFC1123
+ lastModifiedHeader = "last-modified"
+ objectDownloadsHeader = "X-Object-Downloads"
+)
+
+// ObjectInfo represents object info
+type ObjectInfo struct {
+ Size uint64 `json:"bytes"`
+ ContentType string `json:"content_type"`
+ Downloaded uint64 `json:"downloaded"`
+ Hash string `json:"hash"`
+ LastModifiedStr string `json:"last_modified"`
+ LastModified time.Time `json:"-"`
+ Name string `json:"name"`
+}
+
+type Object struct {
+ name string
+ container ContainerAPI
+ api API
+}
+
+type ObjectAPI interface {
+ Info() (ObjectInfo, error)
+ Remove() error
+ Download() ([]byte, error)
+ Upload(reader io.Reader, contentType string) error
+ UploadFile(filename string) error
+ GetReader() (io.ReadCloser, error)
+}
+
+func (c *Client) ObjectInfo(container, filename string) (f ObjectInfo, err error) {
+ request, err := c.NewRequest(headMethod, nil, container, filename)
+ if err != nil {
+ return f, err
+ }
+ res, err := c.do(request)
+ if err != nil {
+ return f, err
+ }
+ if res.StatusCode == http.StatusNotFound {
+ return f, ErrorObjectNotFound
+ }
+ if res.StatusCode != http.StatusOK {
+ return f, ErrorBadResponce
+ }
+ parse := func(key string) uint64 {
+ v, _ := strconv.ParseUint(res.Header.Get(key), uint64Base, uint64BitSize)
+ return v
+ }
+ f.Size = uint64(res.ContentLength)
+ f.Hash = res.Header.Get(etagHeader)
+ f.ContentType = res.Header.Get(contentTypeHeader)
+ f.LastModified, err = time.Parse(lastModifiedLayout, res.Header.Get(lastModifiedHeader))
+ f.Name = filename
+ if err != nil {
+ return
+ }
+ f.Downloaded = parse(objectDownloadsHeader)
+ return
+}
+
+func (o *Object) Info() (info ObjectInfo, err error) {
+ return o.container.ObjectInfo(o.name)
+}
+
+func (o *Object) Upload(reader io.Reader, contentType string) error {
+ return o.container.Upload(reader, o.name, contentType)
+}
+
+func (o *Object) UploadFile(filename string) error {
+ return o.container.UploadFile(filename)
+}
+
+func (o *Object) Download() ([]byte, error) {
+ reader, err := o.GetReader()
+ if err != nil {
+ return nil, err
+ }
+ return ioutil.ReadAll(reader)
+}
+
+func (o *Object) GetReader() (io.ReadCloser, error) {
+ request, _ := http.NewRequest(getMethod, o.container.URL(o.name), nil)
+ res, err := o.api.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ if res.StatusCode == http.StatusNotFound {
+ return nil, ErrorObjectNotFound
+ }
+ if res.StatusCode != http.StatusOK {
+ return nil, ErrorBadResponce
+ }
+ return res.Body, nil
+}
+
+func (o *Object) Remove() error {
+ return o.container.RemoveObject(o.name)
+}
diff --git a/vendor/github.com/ernado/selectel/storage/selctl/main.go b/vendor/github.com/ernado/selectel/storage/selctl/main.go
new file mode 100644
index 0000000..5bf657b
--- /dev/null
+++ b/vendor/github.com/ernado/selectel/storage/selctl/main.go
@@ -0,0 +1,436 @@
+package main
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "github.com/cheggaaa/pb"
+ "github.com/ernado/selectel/storage"
+ "github.com/jwaldrip/odin/cli"
+ "github.com/olekukonko/tablewriter"
+ "io"
+ "io/ioutil"
+ "log"
+ "mime"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+const (
+ envKey = storage.EnvKey
+ envUser = storage.EnvUser
+ version = "1.1"
+ cacheFilename = "~selct.cache~" + version
+ envCache = "SELECTEL_CACHE"
+ envContainer = "SELECTEL_CONTAINER"
+)
+
+var (
+ client = cli.New(version, "Selectel storage command line client", connect)
+ user, key string
+ container string
+ api storage.API
+ debug bool
+ cache bool
+ cacheSecure bool
+ errorNotEnough = errors.New("Not enought arguments")
+)
+
+func encryptionKey() []byte {
+ hasher := sha256.New()
+ hasher.Write([]byte("selectel storage command line client"))
+ hasher.Write([]byte(key))
+ hasher.Write([]byte(user))
+ return hasher.Sum(nil)
+}
+
+func encrypt(data []byte) []byte {
+ block, err := aes.NewCipher(encryptionKey())
+ if err != nil {
+ panic(err)
+ }
+
+ ciphertext := make([]byte, aes.BlockSize+len(data))
+ iv := ciphertext[:aes.BlockSize]
+ if _, err := io.ReadFull(rand.Reader, iv); err != nil {
+ panic(err)
+ }
+
+ stream := cipher.NewCFBEncrypter(block, iv)
+ stream.XORKeyStream(ciphertext[aes.BlockSize:], data)
+ return ciphertext
+}
+
+func decrypt(data []byte) ([]byte, error) {
+ block, err := aes.NewCipher(encryptionKey())
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < aes.BlockSize {
+ return nil, errors.New("ciphertext too short")
+ }
+ iv := data[:aes.BlockSize]
+ data = data[aes.BlockSize:]
+ stream := cipher.NewCFBDecrypter(block, iv)
+ stream.XORKeyStream(data, data)
+
+ return data, nil
+}
+
+func init() {
+ client.DefineBoolFlagVar(&debug, "debug", false, "debug mode")
+ client.DefineBoolFlagVar(&cache, "cache", false, fmt.Sprintf("cache token in file (%s)", envCache))
+ client.DefineBoolFlagVar(&cacheSecure, "cache.secure", true, "encrypt/decrypt token with user-key pair (true by default)")
+ client.DefineStringFlag("key", "", fmt.Sprintf("selectel storage key (%s)", envKey))
+ client.AliasFlag('k', "key")
+ client.DefineStringFlag("user", "", fmt.Sprintf("selectel storage user (%s)", envUser))
+ client.AliasFlag('u', "user")
+ client.DefineStringFlag("container", "", fmt.Sprintf("default container (%s)", envContainer))
+ client.AliasFlag('c', "container")
+
+ infoCommand := client.DefineSubCommand("info", "print information about storage/container/object", wrap(info))
+ infoCommand.DefineStringFlag("type", "storage", "storage, container or object")
+ infoCommand.AliasFlag('t', "type")
+
+ listCommand := client.DefineSubCommand("list", "list objects in container/storage", wrap(list))
+ listCommand.DefineStringFlag("type", "storage", "storage or container")
+ listCommand.AliasFlag('t', "type")
+
+ client.DefineSubCommand("upload", "upload object to container", wrap(upload))
+ downloadCommand := client.DefineSubCommand("download", "download object from container", wrap(download))
+ downloadCommand.DefineStringFlag("path", "", "destination path")
+ downloadCommand.AliasFlag('p', "path")
+
+ client.DefineSubCommand("create", "create container", wrap(create))
+
+ removeCommand := client.DefineSubCommand("remove", "remove object or container", wrap(remove))
+ removeCommand.DefineStringFlag("type", "object", "container or object")
+ removeCommand.DefineBoolFlag("force", false, "remove container with files")
+ removeCommand.AliasFlag('f', "force")
+ removeCommand.AliasFlag('t', "type")
+}
+
+func readFlag(c cli.Command, name, env string) string {
+ if len(os.Getenv(env)) > 0 {
+ return os.Getenv(env)
+ }
+ return c.Flag(name).String()
+}
+
+func blank(s string) bool {
+ return len(s) == 0
+}
+
+func load() ([]byte, error) {
+ f, err := os.Open(cacheFilename)
+ if err != nil {
+ return nil, err
+ }
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+ if !cacheSecure {
+ return data, nil
+ }
+ return decrypt(data)
+}
+
+// connect reads credentials and performs auth
+func connect(c cli.Command) {
+ var err error
+
+ key = readFlag(c, "key", envKey)
+ user = readFlag(c, "user", envUser)
+ container = readFlag(c, "container", envContainer)
+
+ if strings.ToLower(os.Getenv(envCache)) == "true" {
+ cache = true
+ }
+
+ if cache {
+ var data []byte
+ data, err = load()
+ if err != nil {
+ log.Println(err)
+ } else {
+ api, err = storage.NewFromCache(data)
+ if err == nil {
+ return
+ } else {
+ log.Println("unable to load from cache:", err)
+ }
+ }
+ } else {
+ os.Remove(cacheFilename)
+ }
+
+ // checking for blank credentials
+ if blank(key) || blank(user) && api != nil {
+ log.Fatal(storage.ErrorBadCredentials)
+ }
+
+ // connencting to api
+ api = storage.NewAsync(user, key)
+ api.Debug(debug)
+ if err = api.Auth(user, key); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func wrap(callback func(cli.Command)) func(cli.Command) {
+ return func(c cli.Command) {
+ connect(c.Parent())
+ defer func() {
+ if !cache {
+ return
+ }
+ data, _ := api.Dump()
+ if cacheSecure {
+ data = encrypt(data)
+ }
+ f, _ := os.Create(cacheFilename)
+ f.Write(data)
+ }()
+ callback(c)
+ }
+}
+
+// info prints information about storage
+func info(c cli.Command) {
+ var (
+ containerName = container
+ objectName string
+ data interface{}
+ err error
+ arglen = len(c.Args())
+ command = c.Flag("type").String()
+ )
+
+ defer func() {
+ if err != nil {
+ log.Fatal(err)
+ }
+ if blank(containerName) || command == "storage" {
+ data = api.Info()
+ } else {
+ containerApi := api.Container(containerName)
+ if blank(objectName) {
+ data, err = containerApi.Info()
+ } else {
+ data, err = containerApi.Object(objectName).Info()
+ }
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("%+v\n", data)
+ }()
+
+ if arglen > 0 {
+ if command == "container" {
+ containerName = c.Arg(0).String()
+ return
+ }
+ command = "object"
+ if !blank(containerName) && arglen == 1 {
+ objectName = c.Arg(0).String()
+ return
+ }
+ if arglen == 2 {
+ containerName = c.Arg(0).String()
+ objectName = c.Arg(1).String()
+ return
+ }
+ }
+ if command == "container" && !blank(containerName) {
+ return
+ }
+ if command == "storage" {
+ return
+ }
+ err = errorNotEnough
+}
+
+func remove(c cli.Command) {
+ var (
+ arglen = len(c.Args())
+ object string
+ err error
+ message string
+ objects []storage.ObjectAPI
+ )
+ if arglen == 2 {
+ container = c.Arg(0).String()
+ object = c.Arg(1).String()
+ }
+ if arglen == 1 {
+ if c.Flag("type").String() == "container" {
+ container = c.Arg(0).String()
+ } else {
+ object = c.Arg(0).String()
+ }
+ }
+ if blank(container) {
+ log.Fatal(errorNotEnough)
+ }
+ if blank(object) {
+ containerApi := api.Container(container)
+ err = containerApi.Remove()
+
+ // forced removal of container
+ if err == storage.ErrorConianerNotEmpty && c.Flag("force").Get().(bool) {
+ fmt.Println("removing all objects of", container)
+ objects, err = containerApi.Objects()
+ if err != nil {
+ log.Fatal(err)
+ }
+ for _, object := range objects {
+ err = object.Remove()
+ // skipping NotFound errors as non-critical
+ if err != nil && err != storage.ErrorObjectNotFound {
+ log.Fatal(err)
+ }
+ }
+ err = containerApi.Remove()
+ }
+ message = fmt.Sprintf("container %s removed", container)
+ } else {
+ err = api.Container(container).Object(object).Remove()
+ message = fmt.Sprintf("object %s removed in container %s", object, container)
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(message)
+}
+
+func create(c cli.Command) {
+ if len(c.Args()) == 0 {
+ log.Fatal(errorNotEnough)
+ }
+ var name = c.Arg(0).String()
+ if _, err := api.CreateContainer(name, false); err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("created container %s\n", name)
+}
+
+func upload(c cli.Command) {
+ var path string
+ switch len(c.Args()) {
+ case 1:
+ path = c.Arg(0).String()
+ case 2:
+ container = c.Arg(0).String()
+ path = c.Arg(1).String()
+ }
+ if blank(container) || blank(path) {
+ log.Fatal(errorNotEnough)
+ }
+
+ f, err := os.Open(path)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ stat, err := os.Stat(path)
+ if err != nil {
+ log.Fatal(err)
+ }
+ ext := filepath.Ext(path)
+ mimetype := mime.TypeByExtension(ext)
+ bar := pb.New64(stat.Size()).SetUnits(pb.U_BYTES)
+ bar.Start()
+ reader := io.TeeReader(f, bar)
+ if err := api.Container(container).Upload(reader, stat.Name(), mimetype); err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("uploaded to %s\n", container)
+}
+
+func list(c cli.Command) {
+ var (
+ arglen = len(c.Args())
+ table = tablewriter.NewWriter(os.Stdout)
+ )
+ if arglen == 0 && (blank(container) || c.Flag("type").String() == "storage") {
+ containers, err := api.ContainersInfo()
+ if err != nil {
+ log.Fatal(err)
+ }
+ table.SetHeader([]string{"Name", "Objects", "Type"})
+ for _, cont := range containers {
+ v := []string{cont.Name, fmt.Sprint(cont.ObjectCount), cont.Type}
+ table.Append(v)
+ }
+ table.Render()
+ return
+ }
+ if arglen == 1 {
+ container = c.Arg(0).String()
+ }
+ if blank(container) {
+ log.Fatal(errorNotEnough)
+ }
+ objects, err := api.Container(container).ObjectsInfo()
+ if err != nil {
+ log.Fatal(err)
+ }
+ table.SetHeader([]string{"Name", "Size", "Downloaded"})
+ for _, object := range objects {
+ v := []string{object.Name, fmt.Sprint(object.Size), fmt.Sprint(object.Downloaded)}
+ table.Append(v)
+ }
+ table.Render()
+}
+
+func download(c cli.Command) {
+ var (
+ arglen = len(c.Args())
+ objectName string
+ path = c.Flag("path").String()
+ )
+ switch arglen {
+ case 1:
+ objectName = c.Arg(0).String()
+ case 2:
+ objectName = c.Arg(1).String()
+ container = c.Arg(0).String()
+ }
+ if blank(container) || blank(objectName) {
+ log.Fatal(errorNotEnough)
+ }
+ if blank(path) {
+ path = objectName
+ }
+ reader, err := api.Container(container).Object(objectName).GetReader()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer reader.Close()
+ fmt.Printf("downloading %s->%s from %s\n", objectName, path, container)
+ f, err := os.Create(path)
+ if err != nil {
+ log.Fatal(err)
+ }
+ n, err := io.Copy(f, reader)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("downloaded %s, %d bytes\n", objectName, n)
+}
+
+func main() {
+ defer func() {
+ if r := recover(); r != nil {
+ fmt.Println("Recovered", r)
+ }
+ }()
+ client.Start()
+}
diff --git a/vendor/github.com/ernado/selectel/storage/storage.go b/vendor/github.com/ernado/selectel/storage/storage.go
new file mode 100644
index 0000000..06f9ed4
--- /dev/null
+++ b/vendor/github.com/ernado/selectel/storage/storage.go
@@ -0,0 +1,393 @@
+package storage
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ fileLastModifiedLayout = "2006-01-02T15:04:05.999999"
+ queryFormat = "format"
+ queryJSON = "json"
+ headMethod = "HEAD"
+ getMethod = "GET"
+ postMethod = "POST"
+ putMethod = "PUT"
+ deleteMethod = "DELETE"
+ authTokenHeader = "X-Auth-Token"
+ objectCountHeader = "X-Account-Object-Count"
+ bytesUsedHeader = "X-Account-Bytes-Used"
+ containerCountHeader = "X-Account-Container-Count"
+ recievedBytesHeader = "X-Received-Bytes"
+ transferedBytesHeader = "X-Transfered-Bytes"
+ uint64BitSize = 64
+ uint64Base = 10
+ // EnvUser is environmental variable for selectel api username
+ EnvUser = "SELECTEL_USER"
+ // EnvKey is environmental variable for selectel api key
+ EnvKey = "SELECTEL_KEY"
+)
+
+var (
+ // ErrorObjectNotFound occurs when server returns 404
+ ErrorObjectNotFound = errors.New("Object not found")
+ // ErrorBadResponce occurs when server returns unexpected code
+ ErrorBadResponce = errors.New("Unable to process api responce")
+ // ErrorBadName
+ ErrorBadName = errors.New("Bad container/object name provided")
+ // ErrorBadJSON occurs on unmarhalling error
+ ErrorBadJSON = errors.New("Unable to parse api responce")
+)
+
+// Client is selectel storage api client
+type Client struct {
+ storageURL *url.URL
+ token string
+ tokenExpire int
+ expireFrom *time.Time
+ user string
+ key string
+ client DoClient
+ file fileMock
+ debug bool
+}
+
+type ClientCredentials struct {
+ Token string
+ Debug bool
+ Expire int
+ ExpireFrom *time.Time
+ URL string
+}
+
+func NewFromCache(data []byte) (API, error) {
+ var (
+ cache = new(ClientCredentials)
+ err error
+ )
+ decorer := gob.NewDecoder(bytes.NewBuffer(data))
+ if err = decorer.Decode(cache); err != nil {
+ return nil, err
+ }
+ c := newClient(new(http.Client))
+ c.token = cache.Token
+ c.tokenExpire = cache.Expire
+ c.debug = cache.Debug
+ c.expireFrom = cache.ExpireFrom
+ c.storageURL, err = url.Parse(cache.URL)
+ if err != nil {
+ return nil, ErrorBadCredentials
+ }
+ return c, nil
+}
+
+func (c *Client) Credentials() (cache ClientCredentials) {
+ cache.URL = c.storageURL.String()
+ cache.Expire = c.tokenExpire
+ cache.ExpireFrom = c.expireFrom
+ cache.Token = c.token
+ cache.Debug = c.debug
+
+ return cache
+}
+
+func (c *Client) Dump() ([]byte, error) {
+ buffer := new(bytes.Buffer)
+ encoder := gob.NewEncoder(buffer)
+ if err := encoder.Encode(c.Credentials()); err != nil {
+ return nil, err
+ }
+ return buffer.Bytes(), nil
+}
+
+// StorageInformation contains some usefull metrics about storage for current user
+type StorageInformation struct {
+ ObjectCount uint64
+ BytesUsed uint64
+ ContainerCount uint64
+ RecievedBytes uint64
+ TransferedBytes uint64
+}
+
+// API for selectel storage
+type API interface {
+ DoClient
+ Info() StorageInformation
+ Upload(reader io.Reader, container, filename, t string) error
+ UploadFile(filename, container string) error
+ Auth(user, key string) error
+ Debug(debug bool)
+ Token() string
+ C(string) ContainerAPI
+ Container(string) ContainerAPI
+ RemoveObject(container, filename string) error
+ URL(container, filename string) string
+ CreateContainer(name string, private bool) (ContainerAPI, error)
+ RemoveContainer(name string) error
+ // ObjectInfo returns information about object in container
+ ObjectInfo(container, filename string) (f ObjectInfo, err error)
+ ObjectsInfo(container string) ([]ObjectInfo, error)
+ ContainerInfo(name string) (info ContainerInfo, err error)
+ ContainersInfo() ([]ContainerInfo, error)
+ Containers() ([]ContainerAPI, error)
+ Credentials() (cache ClientCredentials)
+ Dump() ([]byte, error)
+}
+
+// DoClient is mock of http.Client
+type DoClient interface {
+ Do(request *http.Request) (*http.Response, error)
+}
+
+// setClient sets client
+func (c *Client) setClient(client DoClient) {
+ c.client = client
+}
+
+func (c *Client) Debug(debug bool) {
+ c.debug = debug
+}
+
+// ContainersInfo return all container-specific information from storage
+func (c *Client) ContainersInfo() ([]ContainerInfo, error) {
+ info := []ContainerInfo{}
+ request, err := c.NewRequest(getMethod, nil)
+ if err != nil {
+ return nil, err
+ }
+ query := request.URL.Query()
+ query.Add(queryFormat, queryJSON)
+ request.URL.RawQuery = query.Encode()
+ res, err := c.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, ErrorBadResponce
+ }
+ decoder := json.NewDecoder(res.Body)
+ if err := decoder.Decode(&info); err != nil {
+ return nil, ErrorBadJSON
+ }
+ return info, nil
+}
+
+// Containers return all containers from storage
+func (c *Client) Containers() ([]ContainerAPI, error) {
+ info, err := c.ContainersInfo()
+ if err != nil {
+ return nil, err
+ }
+ containers := []ContainerAPI{}
+ for _, container := range info {
+ containers = append(containers, c.Container(container.Name))
+ }
+ return containers, nil
+}
+
+// ObjectsInfo returns information about all objects in container
+func (c *Client) ObjectsInfo(container string) ([]ObjectInfo, error) {
+ info := []ObjectInfo{}
+ request, err := c.NewRequest(getMethod, nil, container)
+ if err != nil {
+ return nil, err
+ }
+ query := request.URL.Query()
+ query.Add(queryFormat, queryJSON)
+ request.URL.RawQuery = query.Encode()
+ res, err := c.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode == http.StatusNotFound {
+ return nil, ErrorObjectNotFound
+ }
+ if res.StatusCode != http.StatusOK {
+ return nil, ErrorBadResponce
+ }
+ decoder := json.NewDecoder(res.Body)
+ if err := decoder.Decode(&info); err != nil {
+ return nil, ErrorBadJSON
+ }
+ for i, v := range info {
+ info[i].LastModified, err = time.Parse(fileLastModifiedLayout, v.LastModifiedStr)
+ if err != nil {
+ return info, err
+ }
+ }
+ return info, nil
+}
+
+// DeleteObject removes object from specified container
+func (c *Client) RemoveObject(container, filename string) error {
+ request, err := c.NewRequest(deleteMethod, nil, container, filename)
+ if err != nil {
+ return err
+ }
+ res, err := c.Do(request)
+ if err != nil {
+ return err
+ }
+ if res.StatusCode == http.StatusNotFound {
+ return ErrorObjectNotFound
+ }
+ if res.StatusCode == http.StatusNoContent {
+ return nil
+ }
+ return ErrorBadResponce
+}
+
+// Info returns StorageInformation for current user
+func (c *Client) Info() (info StorageInformation) {
+ request, err := c.NewRequest(getMethod, nil)
+ if err != nil {
+ return
+ }
+ res, err := c.do(request)
+ if err != nil {
+ return
+ }
+ parse := func(key string) uint64 {
+ v, _ := strconv.ParseUint(res.Header.Get(key), uint64Base, uint64BitSize)
+ return v
+ }
+ info.BytesUsed = parse(bytesUsedHeader)
+ info.ObjectCount = parse(objectCountHeader)
+ info.ContainerCount = parse(containerCountHeader)
+ info.RecievedBytes = parse(recievedBytesHeader)
+ info.TransferedBytes = parse(transferedBytesHeader)
+ return
+}
+
+// URL returns url for file in container
+func (c *Client) URL(container, filename string) string {
+ return c.url(container, filename)
+}
+
+// Do performs request with auth token
+func (c *Client) Do(request *http.Request) (res *http.Response, err error) {
+ return c.do(request)
+}
+
+func (c *Client) do(request *http.Request) (res *http.Response, err error) {
+ // prevent null pointer dereference
+ if request.Header == nil {
+ request.Header = http.Header{}
+ }
+ // check for token expiration / first request with async auth
+ if request.URL.String() != authURL && c.Expired() {
+ log.Println("[selectel]", "token expired, performing auth")
+ if err = c.Auth(c.user, c.key); err != nil {
+ return
+ }
+ // fix hostname of request
+ c.fixURL(request)
+ }
+ // add auth token to headers
+ if !blank(c.token) {
+ request.Header.Add(authTokenHeader, c.token)
+ }
+ if c.debug {
+ // perform request and record time elapsed
+ start := time.Now().Truncate(time.Millisecond)
+ res, err = c.client.Do(request)
+ stop := time.Now().Truncate(time.Millisecond)
+ duration := stop.Sub(start)
+ // log error
+ if err != nil {
+ log.Println(request.Method, request.URL.String(), err, duration)
+ return
+ }
+ // log request
+ log.Println(request.Method, request.URL.String(), res.StatusCode, duration)
+ // check for auth code
+ } else {
+ res, err = c.client.Do(request)
+ if err != nil {
+ return
+ }
+ }
+ if res.StatusCode == http.StatusUnauthorized {
+ c.expireFrom = nil // ensure that next request will force authentication
+ return nil, ErrorAuth
+ }
+ return
+}
+
+func (c *Client) NewRequest(method string, body io.Reader, parms ...string) (*http.Request, error) {
+ var badName bool
+ for i := range parms {
+ // check for length
+ if len(parms[i]) > 256 {
+ badName = true
+ }
+ // todo: check for trialing slash
+ parms[i] = url.QueryEscape(parms[i])
+ }
+ req, err := http.NewRequest(method, c.url(parms...), body)
+ if err != nil || badName {
+ return nil, ErrorBadName
+ }
+ return req, nil
+}
+
+func (c *Client) fixURL(request *http.Request) error {
+ newRequest, err := http.NewRequest(request.Method, c.url(request.URL.Path), request.Body)
+ *request = *newRequest
+ return err
+}
+
+func (c *Client) url(postfix ...string) string {
+ path := strings.Join(postfix, "/")
+ if c.storageURL == nil {
+ return path
+ }
+ return fmt.Sprintf("%s%s", c.storageURL, path)
+}
+
+// New returns new selectel storage api client
+func New(user, key string) (API, error) {
+ client := newClient(new(http.Client))
+ return client, client.Auth(user, key)
+}
+
+// NewAsync returns new api client and lazily performs auth
+func NewAsync(user, key string) API {
+ c := newClient(new(http.Client))
+ if blank(user) || blank(key) {
+ panic(ErrorBadCredentials)
+ }
+ c.user = user
+ c.key = key
+ return c
+}
+
+func newClient(client *http.Client) *Client {
+ c := new(Client)
+ c.client = client
+ return c
+}
+
+// NewEnv acts as New, but reads credentials from environment
+func NewEnv() (API, error) {
+ user := os.Getenv(EnvUser)
+ key := os.Getenv(EnvKey)
+ return New(user, key)
+}
+
+func blank(s string) bool {
+ return len(s) == 0
+}
diff --git a/vendor/github.com/ernado/selectel/storage/upload.go b/vendor/github.com/ernado/selectel/storage/upload.go
new file mode 100644
index 0000000..21b33ab
--- /dev/null
+++ b/vendor/github.com/ernado/selectel/storage/upload.go
@@ -0,0 +1,129 @@
+package storage
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "os"
+ "path/filepath"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+)
+
+// fileMock is mock for file operations
+type fileMock interface {
+ Open(name string) (*os.File, error)
+ Stat(name string) (os.FileInfo, error)
+}
+
+// fileErrorMock is simple mock that returns specified errors on
+// function call.
+type fileErrorMock struct {
+ errOpen error
+ errStat error
+}
+
+func (f fileErrorMock) Open(name string) (*os.File, error) {
+ return nil, f.errOpen
+}
+
+func (f fileErrorMock) Stat(name string) (os.FileInfo, error) {
+ return nil, f.errStat
+}
+
+func (c *Client) fileOpen(name string) (*os.File, error) {
+ if c.file != nil {
+ return c.file.Open(name)
+ }
+ return os.Open(name)
+}
+
+func (c *Client) fileSetMockError(errOpen, errStat error) {
+ c.file = &fileErrorMock{errOpen, errStat}
+}
+
+func (c *Client) fileStat(name string) (os.FileInfo, error) {
+ if c.file != nil {
+ return c.file.Stat(name)
+ }
+ return os.Stat(name)
+}
+
+// UploadFile to container
+func (c *Client) UploadFile(filename, container string) error {
+ f, err := c.fileOpen(filename)
+ if err != nil {
+ return err
+ }
+ stats, err := c.fileStat(filename)
+ if err != nil {
+ return err
+ }
+ ext := filepath.Ext(filename)
+ mimetype := mime.TypeByExtension(ext)
+ return c.Upload(f, container, stats.Name(), mimetype)
+}
+
+func (c *Client) upload(reader io.Reader, container, filename, contentType string, check bool) error {
+ var etag string
+ closer, ok := reader.(io.ReadCloser)
+ if ok {
+ defer closer.Close()
+ }
+
+ if check {
+ f, err := ioutil.TempFile(os.TempDir(), filename)
+ if err != nil {
+ return err
+ }
+ stat, _ := f.Stat()
+ path := stat.Name()
+ hasher := md5.New()
+ writer := io.MultiWriter(f, hasher)
+ _, err = io.Copy(writer, reader)
+ f.Close()
+ if err != nil {
+ return err
+ }
+ etag = hex.EncodeToString(hasher.Sum(nil))
+ reader, err = os.Open(filepath.Join(os.TempDir(), path))
+ defer os.Remove(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ request, err := c.NewRequest(putMethod, reader, container, filename)
+ if err != nil {
+ return err
+ }
+ if !blank(contentType) {
+ request.Header.Add(contentTypeHeader, contentType)
+ }
+
+ if !blank(etag) {
+ request.Header.Add(etagHeader, etag)
+ }
+
+ res, err := c.do(request)
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode != http.StatusCreated {
+ return ErrorBadResponce
+ }
+
+ return nil
+}
+
+// Upload reads all data from reader and uploads to contaier with filename and content type
+func (c *Client) Upload(reader io.Reader, container, filename, contentType string) error {
+ return c.upload(reader, container, filename, contentType, true)
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
deleted file mode 100644
index 741eeb1..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
+++ /dev/null
@@ -1,892 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package terminal
-
-import (
- "bytes"
- "io"
- "sync"
- "unicode/utf8"
-)
-
-// EscapeCodes contains escape sequences that can be written to the terminal in
-// order to achieve different styles of text.
-type EscapeCodes struct {
- // Foreground colors
- Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte
-
- // Reset all attributes
- Reset []byte
-}
-
-var vt100EscapeCodes = EscapeCodes{
- Black: []byte{keyEscape, '[', '3', '0', 'm'},
- Red: []byte{keyEscape, '[', '3', '1', 'm'},
- Green: []byte{keyEscape, '[', '3', '2', 'm'},
- Yellow: []byte{keyEscape, '[', '3', '3', 'm'},
- Blue: []byte{keyEscape, '[', '3', '4', 'm'},
- Magenta: []byte{keyEscape, '[', '3', '5', 'm'},
- Cyan: []byte{keyEscape, '[', '3', '6', 'm'},
- White: []byte{keyEscape, '[', '3', '7', 'm'},
-
- Reset: []byte{keyEscape, '[', '0', 'm'},
-}
-
-// Terminal contains the state for running a VT100 terminal that is capable of
-// reading lines of input.
-type Terminal struct {
- // AutoCompleteCallback, if non-null, is called for each keypress with
- // the full input line and the current position of the cursor (in
- // bytes, as an index into |line|). If it returns ok=false, the key
- // press is processed normally. Otherwise it returns a replacement line
- // and the new cursor position.
- AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
-
- // Escape contains a pointer to the escape codes for this terminal.
- // It's always a valid pointer, although the escape codes themselves
- // may be empty if the terminal doesn't support them.
- Escape *EscapeCodes
-
- // lock protects the terminal and the state in this object from
- // concurrent processing of a key press and a Write() call.
- lock sync.Mutex
-
- c io.ReadWriter
- prompt []rune
-
- // line is the current line being entered.
- line []rune
- // pos is the logical position of the cursor in line
- pos int
- // echo is true if local echo is enabled
- echo bool
- // pasteActive is true iff there is a bracketed paste operation in
- // progress.
- pasteActive bool
-
- // cursorX contains the current X value of the cursor where the left
- // edge is 0. cursorY contains the row number where the first row of
- // the current line is 0.
- cursorX, cursorY int
- // maxLine is the greatest value of cursorY so far.
- maxLine int
-
- termWidth, termHeight int
-
- // outBuf contains the terminal data to be sent.
- outBuf []byte
- // remainder contains the remainder of any partial key sequences after
- // a read. It aliases into inBuf.
- remainder []byte
- inBuf [256]byte
-
- // history contains previously entered commands so that they can be
- // accessed with the up and down keys.
- history stRingBuffer
- // historyIndex stores the currently accessed history entry, where zero
- // means the immediately previous entry.
- historyIndex int
- // When navigating up and down the history it's possible to return to
- // the incomplete, initial line. That value is stored in
- // historyPending.
- historyPending string
-}
-
-// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is
-// a local terminal, that terminal must first have been put into raw mode.
-// prompt is a string that is written at the start of each input line (i.e.
-// "> ").
-func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
- return &Terminal{
- Escape: &vt100EscapeCodes,
- c: c,
- prompt: []rune(prompt),
- termWidth: 80,
- termHeight: 24,
- echo: true,
- historyIndex: -1,
- }
-}
-
-const (
- keyCtrlD = 4
- keyCtrlU = 21
- keyEnter = '\r'
- keyEscape = 27
- keyBackspace = 127
- keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota
- keyUp
- keyDown
- keyLeft
- keyRight
- keyAltLeft
- keyAltRight
- keyHome
- keyEnd
- keyDeleteWord
- keyDeleteLine
- keyClearScreen
- keyPasteStart
- keyPasteEnd
-)
-
-var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
-var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'}
-
-// bytesToKey tries to parse a key sequence from b. If successful, it returns
-// the key and the remainder of the input. Otherwise it returns utf8.RuneError.
-func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
- if len(b) == 0 {
- return utf8.RuneError, nil
- }
-
- if !pasteActive {
- switch b[0] {
- case 1: // ^A
- return keyHome, b[1:]
- case 5: // ^E
- return keyEnd, b[1:]
- case 8: // ^H
- return keyBackspace, b[1:]
- case 11: // ^K
- return keyDeleteLine, b[1:]
- case 12: // ^L
- return keyClearScreen, b[1:]
- case 23: // ^W
- return keyDeleteWord, b[1:]
- }
- }
-
- if b[0] != keyEscape {
- if !utf8.FullRune(b) {
- return utf8.RuneError, b
- }
- r, l := utf8.DecodeRune(b)
- return r, b[l:]
- }
-
- if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' {
- switch b[2] {
- case 'A':
- return keyUp, b[3:]
- case 'B':
- return keyDown, b[3:]
- case 'C':
- return keyRight, b[3:]
- case 'D':
- return keyLeft, b[3:]
- case 'H':
- return keyHome, b[3:]
- case 'F':
- return keyEnd, b[3:]
- }
- }
-
- if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
- switch b[5] {
- case 'C':
- return keyAltRight, b[6:]
- case 'D':
- return keyAltLeft, b[6:]
- }
- }
-
- if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) {
- return keyPasteStart, b[6:]
- }
-
- if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) {
- return keyPasteEnd, b[6:]
- }
-
- // If we get here then we have a key that we don't recognise, or a
- // partial sequence. It's not clear how one should find the end of a
- // sequence without knowing them all, but it seems that [a-zA-Z~] only
- // appears at the end of a sequence.
- for i, c := range b[0:] {
- if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' {
- return keyUnknown, b[i+1:]
- }
- }
-
- return utf8.RuneError, b
-}
-
-// queue appends data to the end of t.outBuf
-func (t *Terminal) queue(data []rune) {
- t.outBuf = append(t.outBuf, []byte(string(data))...)
-}
-
-var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}
-var space = []rune{' '}
-
-func isPrintable(key rune) bool {
- isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
- return key >= 32 && !isInSurrogateArea
-}
-
-// moveCursorToPos appends data to t.outBuf which will move the cursor to the
-// given, logical position in the text.
-func (t *Terminal) moveCursorToPos(pos int) {
- if !t.echo {
- return
- }
-
- x := visualLength(t.prompt) + pos
- y := x / t.termWidth
- x = x % t.termWidth
-
- up := 0
- if y < t.cursorY {
- up = t.cursorY - y
- }
-
- down := 0
- if y > t.cursorY {
- down = y - t.cursorY
- }
-
- left := 0
- if x < t.cursorX {
- left = t.cursorX - x
- }
-
- right := 0
- if x > t.cursorX {
- right = x - t.cursorX
- }
-
- t.cursorX = x
- t.cursorY = y
- t.move(up, down, left, right)
-}
-
-func (t *Terminal) move(up, down, left, right int) {
- movement := make([]rune, 3*(up+down+left+right))
- m := movement
- for i := 0; i < up; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'A'
- m = m[3:]
- }
- for i := 0; i < down; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'B'
- m = m[3:]
- }
- for i := 0; i < left; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'D'
- m = m[3:]
- }
- for i := 0; i < right; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'C'
- m = m[3:]
- }
-
- t.queue(movement)
-}
-
-func (t *Terminal) clearLineToRight() {
- op := []rune{keyEscape, '[', 'K'}
- t.queue(op)
-}
-
-const maxLineLength = 4096
-
-func (t *Terminal) setLine(newLine []rune, newPos int) {
- if t.echo {
- t.moveCursorToPos(0)
- t.writeLine(newLine)
- for i := len(newLine); i < len(t.line); i++ {
- t.writeLine(space)
- }
- t.moveCursorToPos(newPos)
- }
- t.line = newLine
- t.pos = newPos
-}
-
-func (t *Terminal) advanceCursor(places int) {
- t.cursorX += places
- t.cursorY += t.cursorX / t.termWidth
- if t.cursorY > t.maxLine {
- t.maxLine = t.cursorY
- }
- t.cursorX = t.cursorX % t.termWidth
-
- if places > 0 && t.cursorX == 0 {
- // Normally terminals will advance the current position
- // when writing a character. But that doesn't happen
- // for the last character in a line. However, when
- // writing a character (except a new line) that causes
- // a line wrap, the position will be advanced two
- // places.
- //
- // So, if we are stopping at the end of a line, we
- // need to write a newline so that our cursor can be
- // advanced to the next line.
- t.outBuf = append(t.outBuf, '\n')
- }
-}
-
-func (t *Terminal) eraseNPreviousChars(n int) {
- if n == 0 {
- return
- }
-
- if t.pos < n {
- n = t.pos
- }
- t.pos -= n
- t.moveCursorToPos(t.pos)
-
- copy(t.line[t.pos:], t.line[n+t.pos:])
- t.line = t.line[:len(t.line)-n]
- if t.echo {
- t.writeLine(t.line[t.pos:])
- for i := 0; i < n; i++ {
- t.queue(space)
- }
- t.advanceCursor(n)
- t.moveCursorToPos(t.pos)
- }
-}
-
-// countToLeftWord returns then number of characters from the cursor to the
-// start of the previous word.
-func (t *Terminal) countToLeftWord() int {
- if t.pos == 0 {
- return 0
- }
-
- pos := t.pos - 1
- for pos > 0 {
- if t.line[pos] != ' ' {
- break
- }
- pos--
- }
- for pos > 0 {
- if t.line[pos] == ' ' {
- pos++
- break
- }
- pos--
- }
-
- return t.pos - pos
-}
-
-// countToRightWord returns then number of characters from the cursor to the
-// start of the next word.
-func (t *Terminal) countToRightWord() int {
- pos := t.pos
- for pos < len(t.line) {
- if t.line[pos] == ' ' {
- break
- }
- pos++
- }
- for pos < len(t.line) {
- if t.line[pos] != ' ' {
- break
- }
- pos++
- }
- return pos - t.pos
-}
-
-// visualLength returns the number of visible glyphs in s.
-func visualLength(runes []rune) int {
- inEscapeSeq := false
- length := 0
-
- for _, r := range runes {
- switch {
- case inEscapeSeq:
- if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') {
- inEscapeSeq = false
- }
- case r == '\x1b':
- inEscapeSeq = true
- default:
- length++
- }
- }
-
- return length
-}
-
-// handleKey processes the given key and, optionally, returns a line of text
-// that the user has entered.
-func (t *Terminal) handleKey(key rune) (line string, ok bool) {
- if t.pasteActive && key != keyEnter {
- t.addKeyToLine(key)
- return
- }
-
- switch key {
- case keyBackspace:
- if t.pos == 0 {
- return
- }
- t.eraseNPreviousChars(1)
- case keyAltLeft:
- // move left by a word.
- t.pos -= t.countToLeftWord()
- t.moveCursorToPos(t.pos)
- case keyAltRight:
- // move right by a word.
- t.pos += t.countToRightWord()
- t.moveCursorToPos(t.pos)
- case keyLeft:
- if t.pos == 0 {
- return
- }
- t.pos--
- t.moveCursorToPos(t.pos)
- case keyRight:
- if t.pos == len(t.line) {
- return
- }
- t.pos++
- t.moveCursorToPos(t.pos)
- case keyHome:
- if t.pos == 0 {
- return
- }
- t.pos = 0
- t.moveCursorToPos(t.pos)
- case keyEnd:
- if t.pos == len(t.line) {
- return
- }
- t.pos = len(t.line)
- t.moveCursorToPos(t.pos)
- case keyUp:
- entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
- if !ok {
- return "", false
- }
- if t.historyIndex == -1 {
- t.historyPending = string(t.line)
- }
- t.historyIndex++
- runes := []rune(entry)
- t.setLine(runes, len(runes))
- case keyDown:
- switch t.historyIndex {
- case -1:
- return
- case 0:
- runes := []rune(t.historyPending)
- t.setLine(runes, len(runes))
- t.historyIndex--
- default:
- entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
- if ok {
- t.historyIndex--
- runes := []rune(entry)
- t.setLine(runes, len(runes))
- }
- }
- case keyEnter:
- t.moveCursorToPos(len(t.line))
- t.queue([]rune("\r\n"))
- line = string(t.line)
- ok = true
- t.line = t.line[:0]
- t.pos = 0
- t.cursorX = 0
- t.cursorY = 0
- t.maxLine = 0
- case keyDeleteWord:
- // Delete zero or more spaces and then one or more characters.
- t.eraseNPreviousChars(t.countToLeftWord())
- case keyDeleteLine:
- // Delete everything from the current cursor position to the
- // end of line.
- for i := t.pos; i < len(t.line); i++ {
- t.queue(space)
- t.advanceCursor(1)
- }
- t.line = t.line[:t.pos]
- t.moveCursorToPos(t.pos)
- case keyCtrlD:
- // Erase the character under the current position.
- // The EOF case when the line is empty is handled in
- // readLine().
- if t.pos < len(t.line) {
- t.pos++
- t.eraseNPreviousChars(1)
- }
- case keyCtrlU:
- t.eraseNPreviousChars(t.pos)
- case keyClearScreen:
- // Erases the screen and moves the cursor to the home position.
- t.queue([]rune("\x1b[2J\x1b[H"))
- t.queue(t.prompt)
- t.cursorX, t.cursorY = 0, 0
- t.advanceCursor(visualLength(t.prompt))
- t.setLine(t.line, t.pos)
- default:
- if t.AutoCompleteCallback != nil {
- prefix := string(t.line[:t.pos])
- suffix := string(t.line[t.pos:])
-
- t.lock.Unlock()
- newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key)
- t.lock.Lock()
-
- if completeOk {
- t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos]))
- return
- }
- }
- if !isPrintable(key) {
- return
- }
- if len(t.line) == maxLineLength {
- return
- }
- t.addKeyToLine(key)
- }
- return
-}
-
-// addKeyToLine inserts the given key at the current position in the current
-// line.
-func (t *Terminal) addKeyToLine(key rune) {
- if len(t.line) == cap(t.line) {
- newLine := make([]rune, len(t.line), 2*(1+len(t.line)))
- copy(newLine, t.line)
- t.line = newLine
- }
- t.line = t.line[:len(t.line)+1]
- copy(t.line[t.pos+1:], t.line[t.pos:])
- t.line[t.pos] = key
- if t.echo {
- t.writeLine(t.line[t.pos:])
- }
- t.pos++
- t.moveCursorToPos(t.pos)
-}
-
-func (t *Terminal) writeLine(line []rune) {
- for len(line) != 0 {
- remainingOnLine := t.termWidth - t.cursorX
- todo := len(line)
- if todo > remainingOnLine {
- todo = remainingOnLine
- }
- t.queue(line[:todo])
- t.advanceCursor(visualLength(line[:todo]))
- line = line[todo:]
- }
-}
-
-func (t *Terminal) Write(buf []byte) (n int, err error) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- if t.cursorX == 0 && t.cursorY == 0 {
- // This is the easy case: there's nothing on the screen that we
- // have to move out of the way.
- return t.c.Write(buf)
- }
-
- // We have a prompt and possibly user input on the screen. We
- // have to clear it first.
- t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)
- t.cursorX = 0
- t.clearLineToRight()
-
- for t.cursorY > 0 {
- t.move(1 /* up */, 0, 0, 0)
- t.cursorY--
- t.clearLineToRight()
- }
-
- if _, err = t.c.Write(t.outBuf); err != nil {
- return
- }
- t.outBuf = t.outBuf[:0]
-
- if n, err = t.c.Write(buf); err != nil {
- return
- }
-
- t.writeLine(t.prompt)
- if t.echo {
- t.writeLine(t.line)
- }
-
- t.moveCursorToPos(t.pos)
-
- if _, err = t.c.Write(t.outBuf); err != nil {
- return
- }
- t.outBuf = t.outBuf[:0]
- return
-}
-
-// ReadPassword temporarily changes the prompt and reads a password, without
-// echo, from the terminal.
-func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- oldPrompt := t.prompt
- t.prompt = []rune(prompt)
- t.echo = false
-
- line, err = t.readLine()
-
- t.prompt = oldPrompt
- t.echo = true
-
- return
-}
-
-// ReadLine returns a line of input from the terminal.
-func (t *Terminal) ReadLine() (line string, err error) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- return t.readLine()
-}
-
-func (t *Terminal) readLine() (line string, err error) {
- // t.lock must be held at this point
-
- if t.cursorX == 0 && t.cursorY == 0 {
- t.writeLine(t.prompt)
- t.c.Write(t.outBuf)
- t.outBuf = t.outBuf[:0]
- }
-
- lineIsPasted := t.pasteActive
-
- for {
- rest := t.remainder
- lineOk := false
- for !lineOk {
- var key rune
- key, rest = bytesToKey(rest, t.pasteActive)
- if key == utf8.RuneError {
- break
- }
- if !t.pasteActive {
- if key == keyCtrlD {
- if len(t.line) == 0 {
- return "", io.EOF
- }
- }
- if key == keyPasteStart {
- t.pasteActive = true
- if len(t.line) == 0 {
- lineIsPasted = true
- }
- continue
- }
- } else if key == keyPasteEnd {
- t.pasteActive = false
- continue
- }
- if !t.pasteActive {
- lineIsPasted = false
- }
- line, lineOk = t.handleKey(key)
- }
- if len(rest) > 0 {
- n := copy(t.inBuf[:], rest)
- t.remainder = t.inBuf[:n]
- } else {
- t.remainder = nil
- }
- t.c.Write(t.outBuf)
- t.outBuf = t.outBuf[:0]
- if lineOk {
- if t.echo {
- t.historyIndex = -1
- t.history.Add(line)
- }
- if lineIsPasted {
- err = ErrPasteIndicator
- }
- return
- }
-
- // t.remainder is a slice at the beginning of t.inBuf
- // containing a partial key sequence
- readBuf := t.inBuf[len(t.remainder):]
- var n int
-
- t.lock.Unlock()
- n, err = t.c.Read(readBuf)
- t.lock.Lock()
-
- if err != nil {
- return
- }
-
- t.remainder = t.inBuf[:n+len(t.remainder)]
- }
-
- panic("unreachable") // for Go 1.0.
-}
-
-// SetPrompt sets the prompt to be used when reading subsequent lines.
-func (t *Terminal) SetPrompt(prompt string) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- t.prompt = []rune(prompt)
-}
-
-func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) {
- // Move cursor to column zero at the start of the line.
- t.move(t.cursorY, 0, t.cursorX, 0)
- t.cursorX, t.cursorY = 0, 0
- t.clearLineToRight()
- for t.cursorY < numPrevLines {
- // Move down a line
- t.move(0, 1, 0, 0)
- t.cursorY++
- t.clearLineToRight()
- }
- // Move back to beginning.
- t.move(t.cursorY, 0, 0, 0)
- t.cursorX, t.cursorY = 0, 0
-
- t.queue(t.prompt)
- t.advanceCursor(visualLength(t.prompt))
- t.writeLine(t.line)
- t.moveCursorToPos(t.pos)
-}
-
-func (t *Terminal) SetSize(width, height int) error {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- if width == 0 {
- width = 1
- }
-
- oldWidth := t.termWidth
- t.termWidth, t.termHeight = width, height
-
- switch {
- case width == oldWidth:
- // If the width didn't change then nothing else needs to be
- // done.
- return nil
- case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0:
- // If there is nothing on current line and no prompt printed,
- // just do nothing
- return nil
- case width < oldWidth:
- // Some terminals (e.g. xterm) will truncate lines that were
- // too long when shinking. Others, (e.g. gnome-terminal) will
- // attempt to wrap them. For the former, repainting t.maxLine
- // works great, but that behaviour goes badly wrong in the case
- // of the latter because they have doubled every full line.
-
- // We assume that we are working on a terminal that wraps lines
- // and adjust the cursor position based on every previous line
- // wrapping and turning into two. This causes the prompt on
- // xterms to move upwards, which isn't great, but it avoids a
- // huge mess with gnome-terminal.
- if t.cursorX >= t.termWidth {
- t.cursorX = t.termWidth - 1
- }
- t.cursorY *= 2
- t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2)
- case width > oldWidth:
- // If the terminal expands then our position calculations will
- // be wrong in the future because we think the cursor is
- // |t.pos| chars into the string, but there will be a gap at
- // the end of any wrapped line.
- //
- // But the position will actually be correct until we move, so
- // we can move back to the beginning and repaint everything.
- t.clearAndRepaintLinePlusNPrevious(t.maxLine)
- }
-
- _, err := t.c.Write(t.outBuf)
- t.outBuf = t.outBuf[:0]
- return err
-}
-
-type pasteIndicatorError struct{}
-
-func (pasteIndicatorError) Error() string {
- return "terminal: ErrPasteIndicator not correctly handled"
-}
-
-// ErrPasteIndicator may be returned from ReadLine as the error, in addition
-// to valid line data. It indicates that bracketed paste mode is enabled and
-// that the returned line consists only of pasted data. Programs may wish to
-// interpret pasted data more literally than typed data.
-var ErrPasteIndicator = pasteIndicatorError{}
-
-// SetBracketedPasteMode requests that the terminal bracket paste operations
-// with markers. Not all terminals support this but, if it is supported, then
-// enabling this mode will stop any autocomplete callback from running due to
-// pastes. Additionally, any lines that are completely pasted will be returned
-// from ReadLine with the error set to ErrPasteIndicator.
-func (t *Terminal) SetBracketedPasteMode(on bool) {
- if on {
- io.WriteString(t.c, "\x1b[?2004h")
- } else {
- io.WriteString(t.c, "\x1b[?2004l")
- }
-}
-
-// stRingBuffer is a ring buffer of strings.
-type stRingBuffer struct {
- // entries contains max elements.
- entries []string
- max int
- // head contains the index of the element most recently added to the ring.
- head int
- // size contains the number of elements in the ring.
- size int
-}
-
-func (s *stRingBuffer) Add(a string) {
- if s.entries == nil {
- const defaultNumEntries = 100
- s.entries = make([]string, defaultNumEntries)
- s.max = defaultNumEntries
- }
-
- s.head = (s.head + 1) % s.max
- s.entries[s.head] = a
- if s.size < s.max {
- s.size++
- }
-}
-
-// NthPreviousEntry returns the value passed to the nth previous call to Add.
-// If n is zero then the immediately prior value is returned, if one, then the
-// next most recent, and so on. If such an element doesn't exist then ok is
-// false.
-func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
- if n >= s.size {
- return "", false
- }
- index := s.head - n
- if index < 0 {
- index += s.max
- }
- return s.entries[index], true
-}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go
deleted file mode 100644
index 0763c9a..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
-
-// Package terminal provides support functions for dealing with terminals, as
-// commonly found on UNIX systems.
-//
-// Putting a terminal into raw mode is the most common requirement:
-//
-// oldState, err := terminal.MakeRaw(0)
-// if err != nil {
-// panic(err)
-// }
-// defer terminal.Restore(0, oldState)
-package terminal
-
-import (
- "io"
- "syscall"
- "unsafe"
-)
-
-// State contains the state of a terminal.
-type State struct {
- termios syscall.Termios
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd int) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd int) (*State, error) {
- var oldState State
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- newState := oldState.termios
- newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF
- newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- return &oldState, nil
-}
-
-// GetState returns the current state of a terminal which may be useful to
-// restore the terminal after a signal.
-func GetState(fd int) (*State, error) {
- var oldState State
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- return &oldState, nil
-}
-
-// Restore restores the terminal connected to the given file descriptor to a
-// previous state.
-func Restore(fd int, state *State) error {
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
- return err
-}
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (width, height int, err error) {
- var dimensions [4]uint16
-
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
- return -1, -1, err
- }
- return int(dimensions[1]), int(dimensions[0]), nil
-}
-
-// ReadPassword reads a line of input from a terminal without local echo. This
-// is commonly used for inputting passwords and other sensitive data. The slice
-// returned does not include the \n.
-func ReadPassword(fd int) ([]byte, error) {
- var oldState syscall.Termios
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- newState := oldState
- newState.Lflag &^= syscall.ECHO
- newState.Lflag |= syscall.ICANON | syscall.ISIG
- newState.Iflag |= syscall.ICRNL
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- defer func() {
- syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
- }()
-
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(fd, buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
deleted file mode 100644
index 9c1ffd1..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd netbsd openbsd
-
-package terminal
-
-import "syscall"
-
-const ioctlReadTermios = syscall.TIOCGETA
-const ioctlWriteTermios = syscall.TIOCSETA
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
deleted file mode 100644
index 5883b22..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package terminal
-
-// These constants are declared here, rather than importing
-// them from the syscall package as some syscall packages, even
-// on linux, for example gccgo, do not declare them.
-const ioctlReadTermios = 0x5401 // syscall.TCGETS
-const ioctlWriteTermios = 0x5402 // syscall.TCSETS
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
deleted file mode 100644
index 2dd6c3d..0000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-// Package terminal provides support functions for dealing with terminals, as
-// commonly found on UNIX systems.
-//
-// Putting a terminal into raw mode is the most common requirement:
-//
-// oldState, err := terminal.MakeRaw(0)
-// if err != nil {
-// panic(err)
-// }
-// defer terminal.Restore(0, oldState)
-package terminal
-
-import (
- "io"
- "syscall"
- "unsafe"
-)
-
-const (
- enableLineInput = 2
- enableEchoInput = 4
- enableProcessedInput = 1
- enableWindowInput = 8
- enableMouseInput = 16
- enableInsertMode = 32
- enableQuickEditMode = 64
- enableExtendedFlags = 128
- enableAutoPosition = 256
- enableProcessedOutput = 1
- enableWrapAtEolOutput = 2
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
-var (
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
- procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
-)
-
-type (
- short int16
- word uint16
-
- coord struct {
- x short
- y short
- }
- smallRect struct {
- left short
- top short
- right short
- bottom short
- }
- consoleScreenBufferInfo struct {
- size coord
- cursorPosition coord
- attributes word
- window smallRect
- maximumWindowSize coord
- }
-)
-
-type State struct {
- mode uint32
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd int) bool {
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd int) (*State, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- st &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
- _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
- if e != 0 {
- return nil, error(e)
- }
- return &State{st}, nil
-}
-
-// GetState returns the current state of a terminal which may be useful to
-// restore the terminal after a signal.
-func GetState(fd int) (*State, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- return &State{st}, nil
-}
-
-// Restore restores the terminal connected to the given file descriptor to a
-// previous state.
-func Restore(fd int, state *State) error {
- _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
- return err
-}
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (width, height int, err error) {
- var info consoleScreenBufferInfo
- _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
- if e != 0 {
- return 0, 0, error(e)
- }
- return int(info.size.x), int(info.size.y), nil
-}
-
-// ReadPassword reads a line of input from a terminal without local echo. This
-// is commonly used for inputting passwords and other sensitive data. The slice
-// returned does not include the \n.
-func ReadPassword(fd int) ([]byte, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- old := st
-
- st &^= (enableEchoInput)
- st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
- _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
- if e != 0 {
- return nil, error(e)
- }
-
- defer func() {
- syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
- }()
-
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(syscall.Handle(fd), buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- if n > 0 && buf[n-1] == '\r' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json b/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json
deleted file mode 100644
index 4058ba6..0000000
--- a/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json
+++ /dev/null
@@ -1,2209 +0,0 @@
-{
- "kind": "discovery#restDescription",
- "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/n2LVhGPabQO3DmbKxkomJprJEEo\"",
- "discoveryVersion": "v1",
- "id": "bigquery:v2",
- "name": "bigquery",
- "version": "v2",
- "revision": "20141112",
- "title": "BigQuery API",
- "description": "A data platform for customers to create, manage, share and query data.",
- "ownerDomain": "google.com",
- "ownerName": "Google",
- "icons": {
- "x16": "https://www.google.com/images/icons/product/search-16.gif",
- "x32": "https://www.google.com/images/icons/product/search-32.gif"
- },
- "documentationLink": "https://cloud.google.com/bigquery/",
- "protocol": "rest",
- "baseUrl": "https://www.googleapis.com/bigquery/v2/",
- "basePath": "/bigquery/v2/",
- "rootUrl": "https://www.googleapis.com/",
- "servicePath": "bigquery/v2/",
- "batchPath": "batch",
- "parameters": {
- "alt": {
- "type": "string",
- "description": "Data format for the response.",
- "default": "json",
- "enum": [
- "csv",
- "json"
- ],
- "enumDescriptions": [
- "Responses with Content-Type of text/csv",
- "Responses with Content-Type of application/json"
- ],
- "location": "query"
- },
- "fields": {
- "type": "string",
- "description": "Selector specifying which fields to include in a partial response.",
- "location": "query"
- },
- "key": {
- "type": "string",
- "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
- "location": "query"
- },
- "oauth_token": {
- "type": "string",
- "description": "OAuth 2.0 token for the current user.",
- "location": "query"
- },
- "prettyPrint": {
- "type": "boolean",
- "description": "Returns response with indentations and line breaks.",
- "default": "true",
- "location": "query"
- },
- "quotaUser": {
- "type": "string",
- "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
- "location": "query"
- },
- "userIp": {
- "type": "string",
- "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
- "location": "query"
- }
- },
- "auth": {
- "oauth2": {
- "scopes": {
- "https://www.googleapis.com/auth/bigquery": {
- "description": "View and manage your data in Google BigQuery"
- },
- "https://www.googleapis.com/auth/bigquery.insertdata": {
- "description": "Insert data into Google BigQuery"
- },
- "https://www.googleapis.com/auth/cloud-platform": {
- "description": "View and manage your data across Google Cloud Platform services"
- },
- "https://www.googleapis.com/auth/devstorage.full_control": {
- "description": "Manage your data and permissions in Google Cloud Storage"
- },
- "https://www.googleapis.com/auth/devstorage.read_only": {
- "description": "View your data in Google Cloud Storage"
- },
- "https://www.googleapis.com/auth/devstorage.read_write": {
- "description": "Manage your data in Google Cloud Storage"
- }
- }
- }
- },
- "schemas": {
- "CsvOptions": {
- "id": "CsvOptions",
- "type": "object",
- "properties": {
- "allowJaggedRows": {
- "type": "boolean",
- "description": "[Optional] Indicates if BigQuery should accept rows that are missing trailing optional columns. If true, BigQuery treats missing trailing columns as null values. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false."
- },
- "allowQuotedNewlines": {
- "type": "boolean",
- "description": "[Optional] Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false."
- },
- "encoding": {
- "type": "string",
- "description": "[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties."
- },
- "fieldDelimiter": {
- "type": "string",
- "description": "[Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',')."
- },
- "quote": {
- "type": "string",
- "description": "[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true."
- },
- "skipLeadingRows": {
- "type": "integer",
- "description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.",
- "format": "int32"
- }
- }
- },
- "Dataset": {
- "id": "Dataset",
- "type": "object",
- "properties": {
- "access": {
- "type": "array",
- "description": "[Optional] An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER;",
- "items": {
- "type": "object",
- "properties": {
- "domain": {
- "type": "string",
- "description": "[Pick one] A domain to grant access to. Any users signed in with the domain specified will be granted the specified access. Example: \"example.com\"."
- },
- "groupByEmail": {
- "type": "string",
- "description": "[Pick one] An email address of a Google Group to grant access to."
- },
- "role": {
- "type": "string",
- "description": "[Required] Describes the rights granted to the user specified by the other member of the access object. The following string values are supported: READER, WRITER, OWNER."
- },
- "specialGroup": {
- "type": "string",
- "description": "[Pick one] A special group to grant access to. Possible values include: projectOwners: Owners of the enclosing project. projectReaders: Readers of the enclosing project. projectWriters: Writers of the enclosing project. allAuthenticatedUsers: All authenticated BigQuery users."
- },
- "userByEmail": {
- "type": "string",
- "description": "[Pick one] An email address of a user to grant access to. For example: fred@example.com."
- },
- "view": {
- "$ref": "TableReference",
- "description": "[Pick one] A view from a different dataset to grant access to. Queries executed against that view will have read access to tables in this dataset. The role field is not required when this field is set. If that view is updated by any user, access to the view needs to be granted again via an update operation."
- }
- }
- }
- },
- "creationTime": {
- "type": "string",
- "description": "[Output-only] The time when this dataset was created, in milliseconds since the epoch.",
- "format": "int64"
- },
- "datasetReference": {
- "$ref": "DatasetReference",
- "description": "[Required] A reference that identifies the dataset."
- },
- "defaultTableExpirationMs": {
- "type": "string",
- "description": "[Experimental] The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an expirationTime property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the expirationTime for a given table is reached, that table will be deleted automatically. If a table's expirationTime is modified or removed before the table expires, or if you provide an explicit expirationTime when creating a table, that value takes precedence over the default expiration time indicated by this property.",
- "format": "int64"
- },
- "description": {
- "type": "string",
- "description": "[Optional] A user-friendly description of the dataset."
- },
- "etag": {
- "type": "string",
- "description": "[Output-only] A hash of the resource."
- },
- "friendlyName": {
- "type": "string",
- "description": "[Optional] A descriptive name for the dataset."
- },
- "id": {
- "type": "string",
- "description": "[Output-only] The fully-qualified unique name of the dataset in the format projectId:datasetId. The dataset name without the project name is given in the datasetId field. When creating a new dataset, leave this field blank, and instead specify the datasetId field."
- },
- "kind": {
- "type": "string",
- "description": "[Output-only] The resource type.",
- "default": "bigquery#dataset"
- },
- "lastModifiedTime": {
- "type": "string",
- "description": "[Output-only] The date when this dataset or any of its tables was last modified, in milliseconds since the epoch.",
- "format": "int64"
- },
- "location": {
- "type": "string",
- "description": "[Experimental] The location where the data resides. If not present, the data will be stored in the US."
- },
- "selfLink": {
- "type": "string",
- "description": "[Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource."
- }
- }
- },
- "DatasetList": {
- "id": "DatasetList",
- "type": "object",
- "properties": {
- "datasets": {
- "type": "array",
- "description": "An array of the dataset resources in the project. Each resource contains basic information. For full information about a particular dataset resource, use the Datasets: get method. This property is omitted when there are no datasets in the project.",
- "items": {
- "type": "object",
- "properties": {
- "datasetReference": {
- "$ref": "DatasetReference",
- "description": "The dataset reference. Use this property to access specific parts of the dataset's ID, such as project ID or dataset ID."
- },
- "friendlyName": {
- "type": "string",
- "description": "A descriptive name for the dataset, if one exists."
- },
- "id": {
- "type": "string",
- "description": "The fully-qualified, unique, opaque ID of the dataset."
- },
- "kind": {
- "type": "string",
- "description": "The resource type. This property always returns the value \"bigquery#dataset\".",
- "default": "bigquery#dataset"
- }
- }
- }
- },
- "etag": {
- "type": "string",
- "description": "A hash value of the results page. You can use this property to determine if the page has changed since the last request."
- },
- "kind": {
- "type": "string",
- "description": "The list type. This property always returns the value \"bigquery#datasetList\".",
- "default": "bigquery#datasetList"
- },
- "nextPageToken": {
- "type": "string",
- "description": "A token that can be used to request the next results page. This property is omitted on the final results page."
- }
- }
- },
- "DatasetReference": {
- "id": "DatasetReference",
- "type": "object",
- "properties": {
- "datasetId": {
- "type": "string",
- "description": "[Required] A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.",
- "annotations": {
- "required": [
- "bigquery.datasets.update"
- ]
- }
- },
- "projectId": {
- "type": "string",
- "description": "[Optional] The ID of the project containing this dataset.",
- "annotations": {
- "required": [
- "bigquery.datasets.update"
- ]
- }
- }
- }
- },
- "ErrorProto": {
- "id": "ErrorProto",
- "type": "object",
- "properties": {
- "debugInfo": {
- "type": "string",
- "description": "Debugging information. This property is internal to Google and should not be used."
- },
- "location": {
- "type": "string",
- "description": "Specifies where the error occurred, if present."
- },
- "message": {
- "type": "string",
- "description": "A human-readable description of the error."
- },
- "reason": {
- "type": "string",
- "description": "A short error code that summarizes the error."
- }
- }
- },
- "ExternalDataConfiguration": {
- "id": "ExternalDataConfiguration",
- "type": "object",
- "properties": {
- "compression": {
- "type": "string",
- "description": "[Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE."
- },
- "csvOptions": {
- "$ref": "CsvOptions",
- "description": "Additional properties to set if sourceFormat is set to CSV."
- },
- "ignoreUnknownValues": {
- "type": "boolean",
- "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns"
- },
- "maxBadRecords": {
- "type": "integer",
- "description": "[Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.",
- "format": "int32"
- },
- "schema": {
- "$ref": "TableSchema",
- "description": "[Required] The schema for the data."
- },
- "sourceFormat": {
- "type": "string",
- "description": "[Optional] The data format. External data sources must be in CSV format. The default value is CSV."
- },
- "sourceUris": {
- "type": "array",
- "description": "[Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. CSV limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs.",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "GetQueryResultsResponse": {
- "id": "GetQueryResultsResponse",
- "type": "object",
- "properties": {
- "cacheHit": {
- "type": "boolean",
- "description": "Whether the query result was fetched from the query cache."
- },
- "etag": {
- "type": "string",
- "description": "A hash of this response."
- },
- "jobComplete": {
- "type": "boolean",
- "description": "Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available."
- },
- "jobReference": {
- "$ref": "JobReference",
- "description": "Reference to the BigQuery Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults)."
- },
- "kind": {
- "type": "string",
- "description": "The resource type of the response.",
- "default": "bigquery#getQueryResultsResponse"
- },
- "pageToken": {
- "type": "string",
- "description": "A token used for paging results."
- },
- "rows": {
- "type": "array",
- "description": "An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above. Present only when the query completes successfully.",
- "items": {
- "$ref": "TableRow"
- }
- },
- "schema": {
- "$ref": "TableSchema",
- "description": "The schema of the results. Present only when the query completes successfully."
- },
- "totalBytesProcessed": {
- "type": "string",
- "description": "The total number of bytes processed for this query.",
- "format": "int64"
- },
- "totalRows": {
- "type": "string",
- "description": "The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results. Present only when the query completes successfully.",
- "format": "uint64"
- }
- }
- },
- "Job": {
- "id": "Job",
- "type": "object",
- "properties": {
- "configuration": {
- "$ref": "JobConfiguration",
- "description": "[Required] Describes the job configuration."
- },
- "etag": {
- "type": "string",
- "description": "[Output-only] A hash of this resource."
- },
- "id": {
- "type": "string",
- "description": "[Output-only] Opaque ID field of the job"
- },
- "jobReference": {
- "$ref": "JobReference",
- "description": "[Optional] Reference describing the unique-per-user name of the job."
- },
- "kind": {
- "type": "string",
- "description": "[Output-only] The type of the resource.",
- "default": "bigquery#job"
- },
- "selfLink": {
- "type": "string",
- "description": "[Output-only] A URL that can be used to access this resource again."
- },
- "statistics": {
- "$ref": "JobStatistics",
- "description": "[Output-only] Information about the job, including starting time and ending time of the job."
- },
- "status": {
- "$ref": "JobStatus",
- "description": "[Output-only] The status of this job. Examine this value when polling an asynchronous job to see if the job is complete."
- },
- "user_email": {
- "type": "string",
- "description": "[Output-only] Email address of the user who ran the job."
- }
- }
- },
- "JobConfiguration": {
- "id": "JobConfiguration",
- "type": "object",
- "properties": {
- "copy": {
- "$ref": "JobConfigurationTableCopy",
- "description": "[Pick one] Copies a table."
- },
- "dryRun": {
- "type": "boolean",
- "description": "[Optional] If set, don't actually run this job. A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run. Behavior of non-query jobs is undefined."
- },
- "extract": {
- "$ref": "JobConfigurationExtract",
- "description": "[Pick one] Configures an extract job."
- },
- "link": {
- "$ref": "JobConfigurationLink",
- "description": "[Pick one] Configures a link job."
- },
- "load": {
- "$ref": "JobConfigurationLoad",
- "description": "[Pick one] Configures a load job."
- },
- "query": {
- "$ref": "JobConfigurationQuery",
- "description": "[Pick one] Configures a query job."
- }
- }
- },
- "JobConfigurationExtract": {
- "id": "JobConfigurationExtract",
- "type": "object",
- "properties": {
- "compression": {
- "type": "string",
- "description": "[Optional] The compression type to use for exported files. Possible values include GZIP and NONE. The default value is NONE."
- },
- "destinationFormat": {
- "type": "string",
- "description": "[Optional] The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV. Tables with nested or repeated fields cannot be exported as CSV."
- },
- "destinationUri": {
- "type": "string",
- "description": "[Pick one] DEPRECATED: Use destinationUris instead, passing only one URI as necessary. The fully-qualified Google Cloud Storage URI where the extracted table should be written."
- },
- "destinationUris": {
- "type": "array",
- "description": "[Pick one] A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.",
- "items": {
- "type": "string"
- }
- },
- "fieldDelimiter": {
- "type": "string",
- "description": "[Optional] Delimiter to use between fields in the exported data. Default is ','"
- },
- "printHeader": {
- "type": "boolean",
- "description": "[Optional] Whether to print out a header row in the results. Default is true."
- },
- "sourceTable": {
- "$ref": "TableReference",
- "description": "[Required] A reference to the table being exported."
- }
- }
- },
- "JobConfigurationLink": {
- "id": "JobConfigurationLink",
- "type": "object",
- "properties": {
- "createDisposition": {
- "type": "string",
- "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion."
- },
- "destinationTable": {
- "$ref": "TableReference",
- "description": "[Required] The destination table of the link job."
- },
- "sourceUri": {
- "type": "array",
- "description": "[Required] URI of source table to link.",
- "items": {
- "type": "string"
- }
- },
- "writeDisposition": {
- "type": "string",
- "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
- }
- }
- },
- "JobConfigurationLoad": {
- "id": "JobConfigurationLoad",
- "type": "object",
- "properties": {
- "allowJaggedRows": {
- "type": "boolean",
- "description": "[Optional] Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats."
- },
- "allowQuotedNewlines": {
- "type": "boolean",
- "description": "Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false."
- },
- "createDisposition": {
- "type": "string",
- "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion."
- },
- "destinationTable": {
- "$ref": "TableReference",
- "description": "[Required] The destination table to load the data into."
- },
- "encoding": {
- "type": "string",
- "description": "[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties."
- },
- "fieldDelimiter": {
- "type": "string",
- "description": "[Optional] The separator for fields in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence \"\\t\" to specify a tab separator. The default value is a comma (',')."
- },
- "ignoreUnknownValues": {
- "type": "boolean",
- "description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names"
- },
- "maxBadRecords": {
- "type": "integer",
- "description": "[Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.",
- "format": "int32"
- },
- "projectionFields": {
- "type": "array",
- "description": "[Experimental] If sourceFormat is set to \"DATASTORE_BACKUP\", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.",
- "items": {
- "type": "string"
- }
- },
- "quote": {
- "type": "string",
- "description": "[Optional] The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. The default value is a double-quote ('\"'). If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true."
- },
- "schema": {
- "$ref": "TableSchema",
- "description": "[Optional] The schema for the destination table. The schema can be omitted if the destination table already exists or if the schema can be inferred from the loaded data."
- },
- "schemaInline": {
- "type": "string",
- "description": "[Deprecated] The inline schema. For CSV schemas, specify as \"Field1:Type1[,Field2:Type2]*\". For example, \"foo:STRING, bar:INTEGER, baz:FLOAT\"."
- },
- "schemaInlineFormat": {
- "type": "string",
- "description": "[Deprecated] The format of the schemaInline property."
- },
- "skipLeadingRows": {
- "type": "integer",
- "description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.",
- "format": "int32"
- },
- "sourceFormat": {
- "type": "string",
- "description": "[Optional] The format of the data files. For CSV files, specify \"CSV\". For datastore backups, specify \"DATASTORE_BACKUP\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". The default value is CSV."
- },
- "sourceUris": {
- "type": "array",
- "description": "[Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name.",
- "items": {
- "type": "string"
- }
- },
- "writeDisposition": {
- "type": "string",
- "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
- }
- }
- },
- "JobConfigurationQuery": {
- "id": "JobConfigurationQuery",
- "type": "object",
- "properties": {
- "allowLargeResults": {
- "type": "boolean",
- "description": "If true, allows the query to produce arbitrarily large result tables at a slight cost in performance. Requires destinationTable to be set."
- },
- "createDisposition": {
- "type": "string",
- "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion."
- },
- "defaultDataset": {
- "$ref": "DatasetReference",
- "description": "[Optional] Specifies the default dataset to use for unqualified table names in the query."
- },
- "destinationTable": {
- "$ref": "TableReference",
- "description": "[Optional] Describes the table where the query results should be stored. If not present, a new table will be created to store the results."
- },
- "flattenResults": {
- "type": "boolean",
- "description": "[Optional] Flattens all nested and repeated fields in the query results. The default value is true. allowLargeResults must be true if this is set to false."
- },
- "preserveNulls": {
- "type": "boolean",
- "description": "[Deprecated] This property is deprecated."
- },
- "priority": {
- "type": "string",
- "description": "[Optional] Specifies a priority for the query. Possible values include INTERACTIVE and BATCH. The default value is INTERACTIVE."
- },
- "query": {
- "type": "string",
- "description": "[Required] BigQuery SQL query to execute."
- },
- "tableDefinitions": {
- "type": "object",
- "description": "[Experimental] If querying an external data source outside of BigQuery, describes the data format, location and other properties of the data source. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.",
- "additionalProperties": {
- "$ref": "ExternalDataConfiguration"
- }
- },
- "useQueryCache": {
- "type": "boolean",
- "description": "[Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified."
- },
- "writeDisposition": {
- "type": "string",
- "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
- }
- }
- },
- "JobConfigurationTableCopy": {
- "id": "JobConfigurationTableCopy",
- "type": "object",
- "properties": {
- "createDisposition": {
- "type": "string",
- "description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion."
- },
- "destinationTable": {
- "$ref": "TableReference",
- "description": "[Required] The destination table"
- },
- "sourceTable": {
- "$ref": "TableReference",
- "description": "[Pick one] Source table to copy."
- },
- "sourceTables": {
- "type": "array",
- "description": "[Pick one] Source tables to copy.",
- "items": {
- "$ref": "TableReference"
- }
- },
- "writeDisposition": {
- "type": "string",
- "description": "[Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion."
- }
- }
- },
- "JobList": {
- "id": "JobList",
- "type": "object",
- "properties": {
- "etag": {
- "type": "string",
- "description": "A hash of this page of results."
- },
- "jobs": {
- "type": "array",
- "description": "List of jobs that were requested.",
- "items": {
- "type": "object",
- "properties": {
- "configuration": {
- "$ref": "JobConfiguration",
- "description": "[Full-projection-only] Specifies the job configuration."
- },
- "errorResult": {
- "$ref": "ErrorProto",
- "description": "A result object that will be present only if the job has failed."
- },
- "id": {
- "type": "string",
- "description": "Unique opaque ID of the job."
- },
- "jobReference": {
- "$ref": "JobReference",
- "description": "Job reference uniquely identifying the job."
- },
- "kind": {
- "type": "string",
- "description": "The resource type.",
- "default": "bigquery#job"
- },
- "state": {
- "type": "string",
- "description": "Running state of the job. When the state is DONE, errorResult can be checked to determine whether the job succeeded or failed."
- },
- "statistics": {
- "$ref": "JobStatistics",
- "description": "[Output-only] Information about the job, including starting time and ending time of the job."
- },
- "status": {
- "$ref": "JobStatus",
- "description": "[Full-projection-only] Describes the state of the job."
- },
- "user_email": {
- "type": "string",
- "description": "[Full-projection-only] Email address of the user who ran the job."
- }
- }
- }
- },
- "kind": {
- "type": "string",
- "description": "The resource type of the response.",
- "default": "bigquery#jobList"
- },
- "nextPageToken": {
- "type": "string",
- "description": "A token to request the next page of results."
- },
- "totalItems": {
- "type": "integer",
- "description": "Total number of jobs in this collection.",
- "format": "int32"
- }
- }
- },
- "JobReference": {
- "id": "JobReference",
- "type": "object",
- "properties": {
- "jobId": {
- "type": "string",
- "description": "[Required] The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.",
- "annotations": {
- "required": [
- "bigquery.jobs.getQueryResults"
- ]
- }
- },
- "projectId": {
- "type": "string",
- "description": "[Required] The ID of the project containing this job.",
- "annotations": {
- "required": [
- "bigquery.jobs.getQueryResults"
- ]
- }
- }
- }
- },
- "JobStatistics": {
- "id": "JobStatistics",
- "type": "object",
- "properties": {
- "creationTime": {
- "type": "string",
- "description": "[Output-only] Creation time of this job, in milliseconds since the epoch. This field will be present on all jobs.",
- "format": "int64"
- },
- "endTime": {
- "type": "string",
- "description": "[Output-only] End time of this job, in milliseconds since the epoch. This field will be present whenever a job is in the DONE state.",
- "format": "int64"
- },
- "extract": {
- "$ref": "JobStatistics4",
- "description": "[Output-only] Statistics for an extract job."
- },
- "load": {
- "$ref": "JobStatistics3",
- "description": "[Output-only] Statistics for a load job."
- },
- "query": {
- "$ref": "JobStatistics2",
- "description": "[Output-only] Statistics for a query job."
- },
- "startTime": {
- "type": "string",
- "description": "[Output-only] Start time of this job, in milliseconds since the epoch. This field will be present when the job transitions from the PENDING state to either RUNNING or DONE.",
- "format": "int64"
- },
- "totalBytesProcessed": {
- "type": "string",
- "description": "[Output-only] [Deprecated] Use the bytes processed in the query statistics instead.",
- "format": "int64"
- }
- }
- },
- "JobStatistics2": {
- "id": "JobStatistics2",
- "type": "object",
- "properties": {
- "cacheHit": {
- "type": "boolean",
- "description": "[Output-only] Whether the query result was fetched from the query cache."
- },
- "totalBytesProcessed": {
- "type": "string",
- "description": "[Output-only] Total bytes processed for this job.",
- "format": "int64"
- }
- }
- },
- "JobStatistics3": {
- "id": "JobStatistics3",
- "type": "object",
- "properties": {
- "inputFileBytes": {
- "type": "string",
- "description": "[Output-only] Number of bytes of source data in a joad job.",
- "format": "int64"
- },
- "inputFiles": {
- "type": "string",
- "description": "[Output-only] Number of source files in a load job.",
- "format": "int64"
- },
- "outputBytes": {
- "type": "string",
- "description": "[Output-only] Size of the loaded data in bytes. Note that while an import job is in the running state, this value may change.",
- "format": "int64"
- },
- "outputRows": {
- "type": "string",
- "description": "[Output-only] Number of rows imported in a load job. Note that while an import job is in the running state, this value may change.",
- "format": "int64"
- }
- }
- },
- "JobStatistics4": {
- "id": "JobStatistics4",
- "type": "object",
- "properties": {
- "destinationUriFileCounts": {
- "type": "array",
- "description": "[Experimental] Number of files per destination URI or URI pattern specified in the extract configuration. These values will be in the same order as the URIs specified in the 'destinationUris' field.",
- "items": {
- "type": "string",
- "format": "int64"
- }
- }
- }
- },
- "JobStatus": {
- "id": "JobStatus",
- "type": "object",
- "properties": {
- "errorResult": {
- "$ref": "ErrorProto",
- "description": "[Output-only] Final error result of the job. If present, indicates that the job has completed and was unsuccessful."
- },
- "errors": {
- "type": "array",
- "description": "[Output-only] All errors encountered during the running of the job. Errors here do not necessarily mean that the job has completed or was unsuccessful.",
- "items": {
- "$ref": "ErrorProto"
- }
- },
- "state": {
- "type": "string",
- "description": "[Output-only] Running state of the job."
- }
- }
- },
- "JsonObject": {
- "id": "JsonObject",
- "type": "object",
- "description": "Represents a single JSON object.",
- "additionalProperties": {
- "$ref": "JsonValue"
- }
- },
- "JsonValue": {
- "id": "JsonValue",
- "type": "any"
- },
- "ProjectList": {
- "id": "ProjectList",
- "type": "object",
- "properties": {
- "etag": {
- "type": "string",
- "description": "A hash of the page of results"
- },
- "kind": {
- "type": "string",
- "description": "The type of list.",
- "default": "bigquery#projectList"
- },
- "nextPageToken": {
- "type": "string",
- "description": "A token to request the next page of results."
- },
- "projects": {
- "type": "array",
- "description": "Projects to which you have at least READ access.",
- "items": {
- "type": "object",
- "properties": {
- "friendlyName": {
- "type": "string",
- "description": "A descriptive name for this project."
- },
- "id": {
- "type": "string",
- "description": "An opaque ID of this project."
- },
- "kind": {
- "type": "string",
- "description": "The resource type.",
- "default": "bigquery#project"
- },
- "numericId": {
- "type": "string",
- "description": "The numeric ID of this project.",
- "format": "uint64"
- },
- "projectReference": {
- "$ref": "ProjectReference",
- "description": "A unique reference to this project."
- }
- }
- }
- },
- "totalItems": {
- "type": "integer",
- "description": "The total number of projects in the list.",
- "format": "int32"
- }
- }
- },
- "ProjectReference": {
- "id": "ProjectReference",
- "type": "object",
- "properties": {
- "projectId": {
- "type": "string",
- "description": "[Required] ID of the project. Can be either the numeric ID or the assigned ID of the project."
- }
- }
- },
- "QueryRequest": {
- "id": "QueryRequest",
- "type": "object",
- "properties": {
- "defaultDataset": {
- "$ref": "DatasetReference",
- "description": "[Optional] Specifies the default datasetId and projectId to assume for any unqualified table names in the query. If not set, all table names in the query string must be qualified in the format 'datasetId.tableId'."
- },
- "dryRun": {
- "type": "boolean",
- "description": "[Optional] If set, don't actually run this job. A valid query will return a mostly empty response with some processing statistics, while an invalid query will return the same error it would if it wasn't a dry run."
- },
- "kind": {
- "type": "string",
- "description": "The resource type of the request.",
- "default": "bigquery#queryRequest"
- },
- "maxResults": {
- "type": "integer",
- "description": "[Optional] The maximum number of rows of data to return per page of results. Setting this flag to a small value such as 1000 and then paging through results might improve reliability when the query result set is large. In addition to this limit, responses are also limited to 10 MB. By default, there is no maximum row count, and only the byte limit applies.",
- "format": "uint32"
- },
- "preserveNulls": {
- "type": "boolean",
- "description": "[Deprecated] This property is deprecated."
- },
- "query": {
- "type": "string",
- "description": "[Required] A query string, following the BigQuery query syntax, of the query to execute. Example: \"SELECT count(f1) FROM [myProjectId:myDatasetId.myTableId]\".",
- "annotations": {
- "required": [
- "bigquery.jobs.query"
- ]
- }
- },
- "timeoutMs": {
- "type": "integer",
- "description": "[Optional] How long to wait for the query to complete, in milliseconds, before the request times out and returns. Note that this is only a timeout for the request, not the query. If the query takes longer to run than the timeout value, the call returns without any results and with the 'jobComplete' flag set to false. You can call GetQueryResults() to wait for the query to complete and read the results. The default value is 10000 milliseconds (10 seconds).",
- "format": "uint32"
- },
- "useQueryCache": {
- "type": "boolean",
- "description": "[Optional] Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever tables in the query are modified. The default value is true."
- }
- }
- },
- "QueryResponse": {
- "id": "QueryResponse",
- "type": "object",
- "properties": {
- "cacheHit": {
- "type": "boolean",
- "description": "Whether the query result was fetched from the query cache."
- },
- "jobComplete": {
- "type": "boolean",
- "description": "Whether the query has completed or not. If rows or totalRows are present, this will always be true. If this is false, totalRows will not be available."
- },
- "jobReference": {
- "$ref": "JobReference",
- "description": "Reference to the Job that was created to run the query. This field will be present even if the original request timed out, in which case GetQueryResults can be used to read the results once the query has completed. Since this API only returns the first page of results, subsequent pages can be fetched via the same mechanism (GetQueryResults)."
- },
- "kind": {
- "type": "string",
- "description": "The resource type.",
- "default": "bigquery#queryResponse"
- },
- "pageToken": {
- "type": "string",
- "description": "A token used for paging results."
- },
- "rows": {
- "type": "array",
- "description": "An object with as many results as can be contained within the maximum permitted reply size. To get any additional rows, you can call GetQueryResults and specify the jobReference returned above.",
- "items": {
- "$ref": "TableRow"
- }
- },
- "schema": {
- "$ref": "TableSchema",
- "description": "The schema of the results. Present only when the query completes successfully."
- },
- "totalBytesProcessed": {
- "type": "string",
- "description": "The total number of bytes processed for this query. If this query was a dry run, this is the number of bytes that would be processed if the query were run.",
- "format": "int64"
- },
- "totalRows": {
- "type": "string",
- "description": "The total number of rows in the complete query result set, which can be more than the number of rows in this single page of results.",
- "format": "uint64"
- }
- }
- },
- "Table": {
- "id": "Table",
- "type": "object",
- "properties": {
- "creationTime": {
- "type": "string",
- "description": "[Output-only] The time when this table was created, in milliseconds since the epoch.",
- "format": "int64"
- },
- "description": {
- "type": "string",
- "description": "[Optional] A user-friendly description of this table."
- },
- "etag": {
- "type": "string",
- "description": "[Output-only] A hash of this resource."
- },
- "expirationTime": {
- "type": "string",
- "description": "[Optional] The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.",
- "format": "int64"
- },
- "friendlyName": {
- "type": "string",
- "description": "[Optional] A descriptive name for this table."
- },
- "id": {
- "type": "string",
- "description": "[Output-only] An opaque ID uniquely identifying the table."
- },
- "kind": {
- "type": "string",
- "description": "[Output-only] The type of the resource.",
- "default": "bigquery#table"
- },
- "lastModifiedTime": {
- "type": "string",
- "description": "[Output-only] The time when this table was last modified, in milliseconds since the epoch.",
- "format": "uint64"
- },
- "numBytes": {
- "type": "string",
- "description": "[Output-only] The size of the table in bytes. This property is unavailable for tables that are actively receiving streaming inserts.",
- "format": "int64"
- },
- "numRows": {
- "type": "string",
- "description": "[Output-only] The number of rows of data in this table. This property is unavailable for tables that are actively receiving streaming inserts.",
- "format": "uint64"
- },
- "schema": {
- "$ref": "TableSchema",
- "description": "[Optional] Describes the schema of this table."
- },
- "selfLink": {
- "type": "string",
- "description": "[Output-only] A URL that can be used to access this resource again."
- },
- "tableReference": {
- "$ref": "TableReference",
- "description": "[Required] Reference describing the ID of this table."
- },
- "type": {
- "type": "string",
- "description": "[Output-only] Describes the table type. The following values are supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined by a SQL query. The default value is TABLE."
- },
- "view": {
- "$ref": "ViewDefinition",
- "description": "[Optional] The view definition."
- }
- }
- },
- "TableCell": {
- "id": "TableCell",
- "type": "object",
- "description": "Represents a single cell in the result set. Users of the java client can detect whether their value result is null by calling 'com.google.api.client.util.Data.isNull(cell.getV())'.",
- "properties": {
- "v": {
- "type": "any"
- }
- }
- },
- "TableDataInsertAllRequest": {
- "id": "TableDataInsertAllRequest",
- "type": "object",
- "properties": {
- "ignoreUnknownValues": {
- "type": "boolean",
- "description": "[Optional] Accept rows that contain values that do not match the schema. The unknown values are ignored. Default is false, which treats unknown values as errors."
- },
- "kind": {
- "type": "string",
- "description": "The resource type of the response.",
- "default": "bigquery#tableDataInsertAllRequest"
- },
- "rows": {
- "type": "array",
- "description": "The rows to insert.",
- "items": {
- "type": "object",
- "properties": {
- "insertId": {
- "type": "string",
- "description": "[Optional] A unique ID for each row. BigQuery uses this property to detect duplicate insertion requests on a best-effort basis."
- },
- "json": {
- "$ref": "JsonObject",
- "description": "[Required] A JSON object that contains a row of data. The object's properties and values must match the destination table's schema."
- }
- }
- }
- },
- "skipInvalidRows": {
- "type": "boolean",
- "description": "[Optional] Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist."
- }
- }
- },
- "TableDataInsertAllResponse": {
- "id": "TableDataInsertAllResponse",
- "type": "object",
- "properties": {
- "insertErrors": {
- "type": "array",
- "description": "An array of errors for rows that were not inserted.",
- "items": {
- "type": "object",
- "properties": {
- "errors": {
- "type": "array",
- "description": "Error information for the row indicated by the index property.",
- "items": {
- "$ref": "ErrorProto"
- }
- },
- "index": {
- "type": "integer",
- "description": "The index of the row that error applies to.",
- "format": "uint32"
- }
- }
- }
- },
- "kind": {
- "type": "string",
- "description": "The resource type of the response.",
- "default": "bigquery#tableDataInsertAllResponse"
- }
- }
- },
- "TableDataList": {
- "id": "TableDataList",
- "type": "object",
- "properties": {
- "etag": {
- "type": "string",
- "description": "A hash of this page of results."
- },
- "kind": {
- "type": "string",
- "description": "The resource type of the response.",
- "default": "bigquery#tableDataList"
- },
- "pageToken": {
- "type": "string",
- "description": "A token used for paging results. Providing this token instead of the startIndex parameter can help you retrieve stable results when an underlying table is changing."
- },
- "rows": {
- "type": "array",
- "description": "Rows of results.",
- "items": {
- "$ref": "TableRow"
- }
- },
- "totalRows": {
- "type": "string",
- "description": "The total number of rows in the complete table.",
- "format": "int64"
- }
- }
- },
- "TableFieldSchema": {
- "id": "TableFieldSchema",
- "type": "object",
- "properties": {
- "description": {
- "type": "string",
- "description": "[Optional] The field description. The maximum length is 16K characters."
- },
- "fields": {
- "type": "array",
- "description": "[Optional] Describes the nested schema fields if the type property is set to RECORD.",
- "items": {
- "$ref": "TableFieldSchema"
- }
- },
- "mode": {
- "type": "string",
- "description": "[Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE."
- },
- "name": {
- "type": "string",
- "description": "[Required] The field name. The name must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a letter or underscore. The maximum length is 128 characters."
- },
- "type": {
- "type": "string",
- "description": "[Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema)."
- }
- }
- },
- "TableList": {
- "id": "TableList",
- "type": "object",
- "properties": {
- "etag": {
- "type": "string",
- "description": "A hash of this page of results."
- },
- "kind": {
- "type": "string",
- "description": "The type of list.",
- "default": "bigquery#tableList"
- },
- "nextPageToken": {
- "type": "string",
- "description": "A token to request the next page of results."
- },
- "tables": {
- "type": "array",
- "description": "Tables in the requested dataset.",
- "items": {
- "type": "object",
- "properties": {
- "friendlyName": {
- "type": "string",
- "description": "The user-friendly name for this table."
- },
- "id": {
- "type": "string",
- "description": "An opaque ID of the table"
- },
- "kind": {
- "type": "string",
- "description": "The resource type.",
- "default": "bigquery#table"
- },
- "tableReference": {
- "$ref": "TableReference",
- "description": "A reference uniquely identifying the table."
- },
- "type": {
- "type": "string",
- "description": "The type of table. Possible values are: TABLE, VIEW."
- }
- }
- }
- },
- "totalItems": {
- "type": "integer",
- "description": "The total number of tables in the dataset.",
- "format": "int32"
- }
- }
- },
- "TableReference": {
- "id": "TableReference",
- "type": "object",
- "properties": {
- "datasetId": {
- "type": "string",
- "description": "[Required] The ID of the dataset containing this table.",
- "annotations": {
- "required": [
- "bigquery.tables.update"
- ]
- }
- },
- "projectId": {
- "type": "string",
- "description": "[Required] The ID of the project containing this table.",
- "annotations": {
- "required": [
- "bigquery.tables.update"
- ]
- }
- },
- "tableId": {
- "type": "string",
- "description": "[Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.",
- "annotations": {
- "required": [
- "bigquery.tables.update"
- ]
- }
- }
- }
- },
- "TableRow": {
- "id": "TableRow",
- "type": "object",
- "description": "Represents a single row in the result set, consisting of one or more fields.",
- "properties": {
- "f": {
- "type": "array",
- "items": {
- "$ref": "TableCell"
- }
- }
- }
- },
- "TableSchema": {
- "id": "TableSchema",
- "type": "object",
- "properties": {
- "fields": {
- "type": "array",
- "description": "Describes the fields in a table.",
- "items": {
- "$ref": "TableFieldSchema"
- }
- }
- }
- },
- "ViewDefinition": {
- "id": "ViewDefinition",
- "type": "object",
- "properties": {
- "query": {
- "type": "string",
- "description": "[Required] A query that BigQuery executes when the view is referenced."
- }
- }
- }
- },
- "resources": {
- "datasets": {
- "methods": {
- "delete": {
- "id": "bigquery.datasets.delete",
- "path": "projects/{projectId}/datasets/{datasetId}",
- "httpMethod": "DELETE",
- "description": "Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of dataset being deleted",
- "required": true,
- "location": "path"
- },
- "deleteContents": {
- "type": "boolean",
- "description": "If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False",
- "location": "query"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the dataset being deleted",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId"
- ],
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "get": {
- "id": "bigquery.datasets.get",
- "path": "projects/{projectId}/datasets/{datasetId}",
- "httpMethod": "GET",
- "description": "Returns the dataset specified by datasetID.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the requested dataset",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the requested dataset",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId"
- ],
- "response": {
- "$ref": "Dataset"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "insert": {
- "id": "bigquery.datasets.insert",
- "path": "projects/{projectId}/datasets",
- "httpMethod": "POST",
- "description": "Creates a new empty dataset.",
- "parameters": {
- "projectId": {
- "type": "string",
- "description": "Project ID of the new dataset",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId"
- ],
- "request": {
- "$ref": "Dataset"
- },
- "response": {
- "$ref": "Dataset"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "list": {
- "id": "bigquery.datasets.list",
- "path": "projects/{projectId}/datasets",
- "httpMethod": "GET",
- "description": "Lists all datasets in the specified project to which you have been granted the READER dataset role.",
- "parameters": {
- "all": {
- "type": "boolean",
- "description": "Whether to list all datasets, including hidden ones",
- "location": "query"
- },
- "maxResults": {
- "type": "integer",
- "description": "The maximum number of results to return",
- "format": "uint32",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "description": "Page token, returned by a previous call, to request the next page of results",
- "location": "query"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the datasets to be listed",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId"
- ],
- "response": {
- "$ref": "DatasetList"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "patch": {
- "id": "bigquery.datasets.patch",
- "path": "projects/{projectId}/datasets/{datasetId}",
- "httpMethod": "PATCH",
- "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the dataset being updated",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the dataset being updated",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId"
- ],
- "request": {
- "$ref": "Dataset"
- },
- "response": {
- "$ref": "Dataset"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "update": {
- "id": "bigquery.datasets.update",
- "path": "projects/{projectId}/datasets/{datasetId}",
- "httpMethod": "PUT",
- "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the dataset being updated",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the dataset being updated",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId"
- ],
- "request": {
- "$ref": "Dataset"
- },
- "response": {
- "$ref": "Dataset"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- }
- }
- },
- "jobs": {
- "methods": {
- "get": {
- "id": "bigquery.jobs.get",
- "path": "projects/{projectId}/jobs/{jobId}",
- "httpMethod": "GET",
- "description": "Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.",
- "parameters": {
- "jobId": {
- "type": "string",
- "description": "Job ID of the requested job",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the requested job",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "jobId"
- ],
- "response": {
- "$ref": "Job"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "getQueryResults": {
- "id": "bigquery.jobs.getQueryResults",
- "path": "projects/{projectId}/queries/{jobId}",
- "httpMethod": "GET",
- "description": "Retrieves the results of a query job.",
- "parameters": {
- "jobId": {
- "type": "string",
- "description": "Job ID of the query job",
- "required": true,
- "location": "path"
- },
- "maxResults": {
- "type": "integer",
- "description": "Maximum number of results to read",
- "format": "uint32",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "description": "Page token, returned by a previous call, to request the next page of results",
- "location": "query"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the query job",
- "required": true,
- "location": "path"
- },
- "startIndex": {
- "type": "string",
- "description": "Zero-based index of the starting row",
- "format": "uint64",
- "location": "query"
- },
- "timeoutMs": {
- "type": "integer",
- "description": "How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error",
- "format": "uint32",
- "location": "query"
- }
- },
- "parameterOrder": [
- "projectId",
- "jobId"
- ],
- "response": {
- "$ref": "GetQueryResultsResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "insert": {
- "id": "bigquery.jobs.insert",
- "path": "projects/{projectId}/jobs",
- "httpMethod": "POST",
- "description": "Starts a new asynchronous job. Requires the Can View project role.",
- "parameters": {
- "projectId": {
- "type": "string",
- "description": "Project ID of the project that will be billed for the job",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId"
- ],
- "request": {
- "$ref": "Job"
- },
- "response": {
- "$ref": "Job"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ],
- "supportsMediaUpload": true,
- "mediaUpload": {
- "accept": [
- "*/*"
- ],
- "protocols": {
- "simple": {
- "multipart": true,
- "path": "/upload/bigquery/v2/projects/{projectId}/jobs"
- },
- "resumable": {
- "multipart": true,
- "path": "/resumable/upload/bigquery/v2/projects/{projectId}/jobs"
- }
- }
- }
- },
- "list": {
- "id": "bigquery.jobs.list",
- "path": "projects/{projectId}/jobs",
- "httpMethod": "GET",
- "description": "Lists all jobs that you started in the specified project. The job list returns in reverse chronological order of when the jobs were created, starting with the most recent job created. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.",
- "parameters": {
- "allUsers": {
- "type": "boolean",
- "description": "Whether to display jobs owned by all users in the project. Default false",
- "location": "query"
- },
- "maxResults": {
- "type": "integer",
- "description": "Maximum number of results to return",
- "format": "uint32",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "description": "Page token, returned by a previous call, to request the next page of results",
- "location": "query"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the jobs to list",
- "required": true,
- "location": "path"
- },
- "projection": {
- "type": "string",
- "description": "Restrict information returned to a set of selected fields",
- "enum": [
- "full",
- "minimal"
- ],
- "enumDescriptions": [
- "Includes all job data",
- "Does not include the job configuration"
- ],
- "location": "query"
- },
- "stateFilter": {
- "type": "string",
- "description": "Filter for job state",
- "enum": [
- "done",
- "pending",
- "running"
- ],
- "enumDescriptions": [
- "Finished jobs",
- "Pending jobs",
- "Running jobs"
- ],
- "repeated": true,
- "location": "query"
- }
- },
- "parameterOrder": [
- "projectId"
- ],
- "response": {
- "$ref": "JobList"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "query": {
- "id": "bigquery.jobs.query",
- "path": "projects/{projectId}/queries",
- "httpMethod": "POST",
- "description": "Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.",
- "parameters": {
- "projectId": {
- "type": "string",
- "description": "Project ID of the project billed for the query",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId"
- ],
- "request": {
- "$ref": "QueryRequest"
- },
- "response": {
- "$ref": "QueryResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- }
- }
- },
- "projects": {
- "methods": {
- "list": {
- "id": "bigquery.projects.list",
- "path": "projects",
- "httpMethod": "GET",
- "description": "Lists all projects to which you have been granted any project role.",
- "parameters": {
- "maxResults": {
- "type": "integer",
- "description": "Maximum number of results to return",
- "format": "uint32",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "description": "Page token, returned by a previous call, to request the next page of results",
- "location": "query"
- }
- },
- "response": {
- "$ref": "ProjectList"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- }
- }
- },
- "tabledata": {
- "methods": {
- "insertAll": {
- "id": "bigquery.tabledata.insertAll",
- "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll",
- "httpMethod": "POST",
- "description": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the destination table.",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the destination table.",
- "required": true,
- "location": "path"
- },
- "tableId": {
- "type": "string",
- "description": "Table ID of the destination table.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId",
- "tableId"
- ],
- "request": {
- "$ref": "TableDataInsertAllRequest"
- },
- "response": {
- "$ref": "TableDataInsertAllResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/bigquery.insertdata",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "list": {
- "id": "bigquery.tabledata.list",
- "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data",
- "httpMethod": "GET",
- "description": "Retrieves table data from a specified set of rows. Requires the READER dataset role.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the table to read",
- "required": true,
- "location": "path"
- },
- "maxResults": {
- "type": "integer",
- "description": "Maximum number of results to return",
- "format": "uint32",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "description": "Page token, returned by a previous call, identifying the result set",
- "location": "query"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the table to read",
- "required": true,
- "location": "path"
- },
- "startIndex": {
- "type": "string",
- "description": "Zero-based index of the starting row to read",
- "format": "uint64",
- "location": "query"
- },
- "tableId": {
- "type": "string",
- "description": "Table ID of the table to read",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId",
- "tableId"
- ],
- "response": {
- "$ref": "TableDataList"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- }
- }
- },
- "tables": {
- "methods": {
- "delete": {
- "id": "bigquery.tables.delete",
- "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
- "httpMethod": "DELETE",
- "description": "Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the table to delete",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the table to delete",
- "required": true,
- "location": "path"
- },
- "tableId": {
- "type": "string",
- "description": "Table ID of the table to delete",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId",
- "tableId"
- ],
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "get": {
- "id": "bigquery.tables.get",
- "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
- "httpMethod": "GET",
- "description": "Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the requested table",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the requested table",
- "required": true,
- "location": "path"
- },
- "tableId": {
- "type": "string",
- "description": "Table ID of the requested table",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId",
- "tableId"
- ],
- "response": {
- "$ref": "Table"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "insert": {
- "id": "bigquery.tables.insert",
- "path": "projects/{projectId}/datasets/{datasetId}/tables",
- "httpMethod": "POST",
- "description": "Creates a new, empty table in the dataset.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the new table",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the new table",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId"
- ],
- "request": {
- "$ref": "Table"
- },
- "response": {
- "$ref": "Table"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "list": {
- "id": "bigquery.tables.list",
- "path": "projects/{projectId}/datasets/{datasetId}/tables",
- "httpMethod": "GET",
- "description": "Lists all tables in the specified dataset. Requires the READER dataset role.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the tables to list",
- "required": true,
- "location": "path"
- },
- "maxResults": {
- "type": "integer",
- "description": "Maximum number of results to return",
- "format": "uint32",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "description": "Page token, returned by a previous call, to request the next page of results",
- "location": "query"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the tables to list",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId"
- ],
- "response": {
- "$ref": "TableList"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "patch": {
- "id": "bigquery.tables.patch",
- "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
- "httpMethod": "PATCH",
- "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the table to update",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the table to update",
- "required": true,
- "location": "path"
- },
- "tableId": {
- "type": "string",
- "description": "Table ID of the table to update",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId",
- "tableId"
- ],
- "request": {
- "$ref": "Table"
- },
- "response": {
- "$ref": "Table"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "update": {
- "id": "bigquery.tables.update",
- "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
- "httpMethod": "PUT",
- "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.",
- "parameters": {
- "datasetId": {
- "type": "string",
- "description": "Dataset ID of the table to update",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "Project ID of the table to update",
- "required": true,
- "location": "path"
- },
- "tableId": {
- "type": "string",
- "description": "Table ID of the table to update",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "datasetId",
- "tableId"
- ],
- "request": {
- "$ref": "Table"
- },
- "response": {
- "$ref": "Table"
- },
- "scopes": [
- "https://www.googleapis.com/auth/bigquery",
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- }
- }
- }
- }
-}
diff --git a/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go b/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go
deleted file mode 100644
index 4c6873d..0000000
--- a/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go
+++ /dev/null
@@ -1,3531 +0,0 @@
-// Package bigquery provides access to the BigQuery API.
-//
-// See https://cloud.google.com/bigquery/
-//
-// Usage example:
-//
-// import "google.golang.org/api/bigquery/v2"
-// ...
-// bigqueryService, err := bigquery.New(oauthHttpClient)
-package bigquery
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "golang.org/x/net/context"
- "google.golang.org/api/googleapi"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-)
-
-// Always reference these packages, just in case the auto-generated code
-// below doesn't.
-var _ = bytes.NewBuffer
-var _ = strconv.Itoa
-var _ = fmt.Sprintf
-var _ = json.NewDecoder
-var _ = io.Copy
-var _ = url.Parse
-var _ = googleapi.Version
-var _ = errors.New
-var _ = strings.Replace
-var _ = context.Background
-
-const apiId = "bigquery:v2"
-const apiName = "bigquery"
-const apiVersion = "v2"
-const basePath = "https://www.googleapis.com/bigquery/v2/"
-
-// OAuth2 scopes used by this API.
-const (
- // View and manage your data in Google BigQuery
- BigqueryScope = "https://www.googleapis.com/auth/bigquery"
-
- // Insert data into Google BigQuery
- BigqueryInsertdataScope = "https://www.googleapis.com/auth/bigquery.insertdata"
-
- // View and manage your data across Google Cloud Platform services
- CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
-
- // Manage your data and permissions in Google Cloud Storage
- DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control"
-
- // View your data in Google Cloud Storage
- DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only"
-
- // Manage your data in Google Cloud Storage
- DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write"
-)
-
-func New(client *http.Client) (*Service, error) {
- if client == nil {
- return nil, errors.New("client is nil")
- }
- s := &Service{client: client, BasePath: basePath}
- s.Datasets = NewDatasetsService(s)
- s.Jobs = NewJobsService(s)
- s.Projects = NewProjectsService(s)
- s.Tabledata = NewTabledataService(s)
- s.Tables = NewTablesService(s)
- return s, nil
-}
-
-type Service struct {
- client *http.Client
- BasePath string // API endpoint base URL
- UserAgent string // optional additional User-Agent fragment
-
- Datasets *DatasetsService
-
- Jobs *JobsService
-
- Projects *ProjectsService
-
- Tabledata *TabledataService
-
- Tables *TablesService
-}
-
-func (s *Service) userAgent() string {
- if s.UserAgent == "" {
- return googleapi.UserAgent
- }
- return googleapi.UserAgent + " " + s.UserAgent
-}
-
-func NewDatasetsService(s *Service) *DatasetsService {
- rs := &DatasetsService{s: s}
- return rs
-}
-
-type DatasetsService struct {
- s *Service
-}
-
-func NewJobsService(s *Service) *JobsService {
- rs := &JobsService{s: s}
- return rs
-}
-
-type JobsService struct {
- s *Service
-}
-
-func NewProjectsService(s *Service) *ProjectsService {
- rs := &ProjectsService{s: s}
- return rs
-}
-
-type ProjectsService struct {
- s *Service
-}
-
-func NewTabledataService(s *Service) *TabledataService {
- rs := &TabledataService{s: s}
- return rs
-}
-
-type TabledataService struct {
- s *Service
-}
-
-func NewTablesService(s *Service) *TablesService {
- rs := &TablesService{s: s}
- return rs
-}
-
-type TablesService struct {
- s *Service
-}
-
-type CsvOptions struct {
- // AllowJaggedRows: [Optional] Indicates if BigQuery should accept rows
- // that are missing trailing optional columns. If true, BigQuery treats
- // missing trailing columns as null values. If false, records with
- // missing trailing columns are treated as bad records, and if there are
- // too many bad records, an invalid error is returned in the job result.
- // The default value is false.
- AllowJaggedRows bool `json:"allowJaggedRows,omitempty"`
-
- // AllowQuotedNewlines: [Optional] Indicates if BigQuery should allow
- // quoted data sections that contain newline characters in a CSV file.
- // The default value is false.
- AllowQuotedNewlines bool `json:"allowQuotedNewlines,omitempty"`
-
- // Encoding: [Optional] The character encoding of the data. The
- // supported values are UTF-8 or ISO-8859-1. The default value is UTF-8.
- // BigQuery decodes the data after the raw, binary data has been split
- // using the values of the quote and fieldDelimiter properties.
- Encoding string `json:"encoding,omitempty"`
-
- // FieldDelimiter: [Optional] The separator for fields in a CSV file.
- // BigQuery converts the string to ISO-8859-1 encoding, and then uses
- // the first byte of the encoded string to split the data in its raw,
- // binary state. BigQuery also supports the escape sequence "\t" to
- // specify a tab separator. The default value is a comma (',').
- FieldDelimiter string `json:"fieldDelimiter,omitempty"`
-
- // Quote: [Optional] The value that is used to quote data sections in a
- // CSV file. BigQuery converts the string to ISO-8859-1 encoding, and
- // then uses the first byte of the encoded string to split the data in
- // its raw, binary state. The default value is a double-quote ('"'). If
- // your data does not contain quoted sections, set the property value to
- // an empty string. If your data contains quoted newline characters, you
- // must also set the allowQuotedNewlines property to true.
- Quote string `json:"quote,omitempty"`
-
- // SkipLeadingRows: [Optional] The number of rows at the top of a CSV
- // file that BigQuery will skip when reading the data. The default value
- // is 0. This property is useful if you have header rows in the file
- // that should be skipped.
- SkipLeadingRows int64 `json:"skipLeadingRows,omitempty"`
-}
-
-type Dataset struct {
- // Access: [Optional] An array of objects that define dataset access for
- // one or more entities. You can set this property when inserting or
- // updating a dataset in order to control who is allowed to access the
- // data. If unspecified at dataset creation time, BigQuery adds default
- // dataset access for the following entities: access.specialGroup:
- // projectReaders; access.role: READER; access.specialGroup:
- // projectWriters; access.role: WRITER; access.specialGroup:
- // projectOwners; access.role: OWNER; access.userByEmail: [dataset
- // creator email]; access.role: OWNER;
- Access []*DatasetAccess `json:"access,omitempty"`
-
- // CreationTime: [Output-only] The time when this dataset was created,
- // in milliseconds since the epoch.
- CreationTime int64 `json:"creationTime,omitempty,string"`
-
- // DatasetReference: [Required] A reference that identifies the dataset.
- DatasetReference *DatasetReference `json:"datasetReference,omitempty"`
-
- // DefaultTableExpirationMs: [Experimental] The default lifetime of all
- // tables in the dataset, in milliseconds. The minimum value is 3600000
- // milliseconds (one hour). Once this property is set, all newly-created
- // tables in the dataset will have an expirationTime property set to the
- // creation time plus the value in this property, and changing the value
- // will only affect new tables, not existing ones. When the
- // expirationTime for a given table is reached, that table will be
- // deleted automatically. If a table's expirationTime is modified or
- // removed before the table expires, or if you provide an explicit
- // expirationTime when creating a table, that value takes precedence
- // over the default expiration time indicated by this property.
- DefaultTableExpirationMs int64 `json:"defaultTableExpirationMs,omitempty,string"`
-
- // Description: [Optional] A user-friendly description of the dataset.
- Description string `json:"description,omitempty"`
-
- // Etag: [Output-only] A hash of the resource.
- Etag string `json:"etag,omitempty"`
-
- // FriendlyName: [Optional] A descriptive name for the dataset.
- FriendlyName string `json:"friendlyName,omitempty"`
-
- // Id: [Output-only] The fully-qualified unique name of the dataset in
- // the format projectId:datasetId. The dataset name without the project
- // name is given in the datasetId field. When creating a new dataset,
- // leave this field blank, and instead specify the datasetId field.
- Id string `json:"id,omitempty"`
-
- // Kind: [Output-only] The resource type.
- Kind string `json:"kind,omitempty"`
-
- // LastModifiedTime: [Output-only] The date when this dataset or any of
- // its tables was last modified, in milliseconds since the epoch.
- LastModifiedTime int64 `json:"lastModifiedTime,omitempty,string"`
-
- // Location: [Experimental] The location where the data resides. If not
- // present, the data will be stored in the US.
- Location string `json:"location,omitempty"`
-
- // SelfLink: [Output-only] A URL that can be used to access the resource
- // again. You can use this URL in Get or Update requests to the
- // resource.
- SelfLink string `json:"selfLink,omitempty"`
-}
-
-type DatasetAccess struct {
- // Domain: [Pick one] A domain to grant access to. Any users signed in
- // with the domain specified will be granted the specified access.
- // Example: "example.com".
- Domain string `json:"domain,omitempty"`
-
- // GroupByEmail: [Pick one] An email address of a Google Group to grant
- // access to.
- GroupByEmail string `json:"groupByEmail,omitempty"`
-
- // Role: [Required] Describes the rights granted to the user specified
- // by the other member of the access object. The following string values
- // are supported: READER, WRITER, OWNER.
- Role string `json:"role,omitempty"`
-
- // SpecialGroup: [Pick one] A special group to grant access to. Possible
- // values include: projectOwners: Owners of the enclosing project.
- // projectReaders: Readers of the enclosing project. projectWriters:
- // Writers of the enclosing project. allAuthenticatedUsers: All
- // authenticated BigQuery users.
- SpecialGroup string `json:"specialGroup,omitempty"`
-
- // UserByEmail: [Pick one] An email address of a user to grant access
- // to. For example: fred@example.com.
- UserByEmail string `json:"userByEmail,omitempty"`
-
- // View: [Pick one] A view from a different dataset to grant access to.
- // Queries executed against that view will have read access to tables in
- // this dataset. The role field is not required when this field is set.
- // If that view is updated by any user, access to the view needs to be
- // granted again via an update operation.
- View *TableReference `json:"view,omitempty"`
-}
-
-type DatasetList struct {
- // Datasets: An array of the dataset resources in the project. Each
- // resource contains basic information. For full information about a
- // particular dataset resource, use the Datasets: get method. This
- // property is omitted when there are no datasets in the project.
- Datasets []*DatasetListDatasets `json:"datasets,omitempty"`
-
- // Etag: A hash value of the results page. You can use this property to
- // determine if the page has changed since the last request.
- Etag string `json:"etag,omitempty"`
-
- // Kind: The list type. This property always returns the value
- // "bigquery#datasetList".
- Kind string `json:"kind,omitempty"`
-
- // NextPageToken: A token that can be used to request the next results
- // page. This property is omitted on the final results page.
- NextPageToken string `json:"nextPageToken,omitempty"`
-}
-
-type DatasetListDatasets struct {
- // DatasetReference: The dataset reference. Use this property to access
- // specific parts of the dataset's ID, such as project ID or dataset ID.
- DatasetReference *DatasetReference `json:"datasetReference,omitempty"`
-
- // FriendlyName: A descriptive name for the dataset, if one exists.
- FriendlyName string `json:"friendlyName,omitempty"`
-
- // Id: The fully-qualified, unique, opaque ID of the dataset.
- Id string `json:"id,omitempty"`
-
- // Kind: The resource type. This property always returns the value
- // "bigquery#dataset".
- Kind string `json:"kind,omitempty"`
-}
-
-type DatasetReference struct {
- // DatasetId: [Required] A unique ID for this dataset, without the
- // project name. The ID must contain only letters (a-z, A-Z), numbers
- // (0-9), or underscores (_). The maximum length is 1,024 characters.
- DatasetId string `json:"datasetId,omitempty"`
-
- // ProjectId: [Optional] The ID of the project containing this dataset.
- ProjectId string `json:"projectId,omitempty"`
-}
-
-type ErrorProto struct {
- // DebugInfo: Debugging information. This property is internal to Google
- // and should not be used.
- DebugInfo string `json:"debugInfo,omitempty"`
-
- // Location: Specifies where the error occurred, if present.
- Location string `json:"location,omitempty"`
-
- // Message: A human-readable description of the error.
- Message string `json:"message,omitempty"`
-
- // Reason: A short error code that summarizes the error.
- Reason string `json:"reason,omitempty"`
-}
-
-type ExternalDataConfiguration struct {
- // Compression: [Optional] The compression type of the data source.
- // Possible values include GZIP and NONE. The default value is NONE.
- Compression string `json:"compression,omitempty"`
-
- // CsvOptions: Additional properties to set if sourceFormat is set to
- // CSV.
- CsvOptions *CsvOptions `json:"csvOptions,omitempty"`
-
- // IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow
- // extra values that are not represented in the table schema. If true,
- // the extra values are ignored. If false, records with extra columns
- // are treated as bad records, and if there are too many bad records, an
- // invalid error is returned in the job result. The default value is
- // false. The sourceFormat property determines what BigQuery treats as
- // an extra value: CSV: Trailing columns
- IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"`
-
- // MaxBadRecords: [Optional] The maximum number of bad records that
- // BigQuery can ignore when reading data. If the number of bad records
- // exceeds this value, an invalid error is returned in the job result.
- // The default value is 0, which requires that all records are valid.
- MaxBadRecords int64 `json:"maxBadRecords,omitempty"`
-
- // Schema: [Required] The schema for the data.
- Schema *TableSchema `json:"schema,omitempty"`
-
- // SourceFormat: [Optional] The data format. External data sources must
- // be in CSV format. The default value is CSV.
- SourceFormat string `json:"sourceFormat,omitempty"`
-
- // SourceUris: [Required] The fully-qualified URIs that point to your
- // data in Google Cloud Storage. Each URI can contain one '*' wildcard
- // character and it must come after the 'bucket' name. CSV limits
- // related to load jobs apply to external data sources, plus an
- // additional limit of 10 GB maximum size across all URIs.
- SourceUris []string `json:"sourceUris,omitempty"`
-}
-
-type GetQueryResultsResponse struct {
- // CacheHit: Whether the query result was fetched from the query cache.
- CacheHit bool `json:"cacheHit,omitempty"`
-
- // Etag: A hash of this response.
- Etag string `json:"etag,omitempty"`
-
- // JobComplete: Whether the query has completed or not. If rows or
- // totalRows are present, this will always be true. If this is false,
- // totalRows will not be available.
- JobComplete bool `json:"jobComplete,omitempty"`
-
- // JobReference: Reference to the BigQuery Job that was created to run
- // the query. This field will be present even if the original request
- // timed out, in which case GetQueryResults can be used to read the
- // results once the query has completed. Since this API only returns the
- // first page of results, subsequent pages can be fetched via the same
- // mechanism (GetQueryResults).
- JobReference *JobReference `json:"jobReference,omitempty"`
-
- // Kind: The resource type of the response.
- Kind string `json:"kind,omitempty"`
-
- // PageToken: A token used for paging results.
- PageToken string `json:"pageToken,omitempty"`
-
- // Rows: An object with as many results as can be contained within the
- // maximum permitted reply size. To get any additional rows, you can
- // call GetQueryResults and specify the jobReference returned above.
- // Present only when the query completes successfully.
- Rows []*TableRow `json:"rows,omitempty"`
-
- // Schema: The schema of the results. Present only when the query
- // completes successfully.
- Schema *TableSchema `json:"schema,omitempty"`
-
- // TotalBytesProcessed: The total number of bytes processed for this
- // query.
- TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"`
-
- // TotalRows: The total number of rows in the complete query result set,
- // which can be more than the number of rows in this single page of
- // results. Present only when the query completes successfully.
- TotalRows uint64 `json:"totalRows,omitempty,string"`
-}
-
-type Job struct {
- // Configuration: [Required] Describes the job configuration.
- Configuration *JobConfiguration `json:"configuration,omitempty"`
-
- // Etag: [Output-only] A hash of this resource.
- Etag string `json:"etag,omitempty"`
-
- // Id: [Output-only] Opaque ID field of the job
- Id string `json:"id,omitempty"`
-
- // JobReference: [Optional] Reference describing the unique-per-user
- // name of the job.
- JobReference *JobReference `json:"jobReference,omitempty"`
-
- // Kind: [Output-only] The type of the resource.
- Kind string `json:"kind,omitempty"`
-
- // SelfLink: [Output-only] A URL that can be used to access this
- // resource again.
- SelfLink string `json:"selfLink,omitempty"`
-
- // Statistics: [Output-only] Information about the job, including
- // starting time and ending time of the job.
- Statistics *JobStatistics `json:"statistics,omitempty"`
-
- // Status: [Output-only] The status of this job. Examine this value when
- // polling an asynchronous job to see if the job is complete.
- Status *JobStatus `json:"status,omitempty"`
-
- // UserEmail: [Output-only] Email address of the user who ran the job.
- UserEmail string `json:"user_email,omitempty"`
-}
-
-type JobConfiguration struct {
- // Copy: [Pick one] Copies a table.
- Copy *JobConfigurationTableCopy `json:"copy,omitempty"`
-
- // DryRun: [Optional] If set, don't actually run this job. A valid query
- // will return a mostly empty response with some processing statistics,
- // while an invalid query will return the same error it would if it
- // wasn't a dry run. Behavior of non-query jobs is undefined.
- DryRun bool `json:"dryRun,omitempty"`
-
- // Extract: [Pick one] Configures an extract job.
- Extract *JobConfigurationExtract `json:"extract,omitempty"`
-
- // Link: [Pick one] Configures a link job.
- Link *JobConfigurationLink `json:"link,omitempty"`
-
- // Load: [Pick one] Configures a load job.
- Load *JobConfigurationLoad `json:"load,omitempty"`
-
- // Query: [Pick one] Configures a query job.
- Query *JobConfigurationQuery `json:"query,omitempty"`
-}
-
-type JobConfigurationExtract struct {
- // Compression: [Optional] The compression type to use for exported
- // files. Possible values include GZIP and NONE. The default value is
- // NONE.
- Compression string `json:"compression,omitempty"`
-
- // DestinationFormat: [Optional] The exported file format. Possible
- // values include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default
- // value is CSV. Tables with nested or repeated fields cannot be
- // exported as CSV.
- DestinationFormat string `json:"destinationFormat,omitempty"`
-
- // DestinationUri: [Pick one] DEPRECATED: Use destinationUris instead,
- // passing only one URI as necessary. The fully-qualified Google Cloud
- // Storage URI where the extracted table should be written.
- DestinationUri string `json:"destinationUri,omitempty"`
-
- // DestinationUris: [Pick one] A list of fully-qualified Google Cloud
- // Storage URIs where the extracted table should be written.
- DestinationUris []string `json:"destinationUris,omitempty"`
-
- // FieldDelimiter: [Optional] Delimiter to use between fields in the
- // exported data. Default is ','
- FieldDelimiter string `json:"fieldDelimiter,omitempty"`
-
- // PrintHeader: [Optional] Whether to print out a header row in the
- // results. Default is true.
- PrintHeader bool `json:"printHeader,omitempty"`
-
- // SourceTable: [Required] A reference to the table being exported.
- SourceTable *TableReference `json:"sourceTable,omitempty"`
-}
-
-type JobConfigurationLink struct {
- // CreateDisposition: [Optional] Specifies whether the job is allowed to
- // create new tables. The following values are supported:
- // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the
- // table. CREATE_NEVER: The table must already exist. If it does not, a
- // 'notFound' error is returned in the job result. The default value is
- // CREATE_IF_NEEDED. Creation, truncation and append actions occur as
- // one atomic update upon job completion.
- CreateDisposition string `json:"createDisposition,omitempty"`
-
- // DestinationTable: [Required] The destination table of the link job.
- DestinationTable *TableReference `json:"destinationTable,omitempty"`
-
- // SourceUri: [Required] URI of source table to link.
- SourceUri []string `json:"sourceUri,omitempty"`
-
- // WriteDisposition: [Optional] Specifies the action that occurs if the
- // destination table already exists. The following values are supported:
- // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
- // table data. WRITE_APPEND: If the table already exists, BigQuery
- // appends the data to the table. WRITE_EMPTY: If the table already
- // exists and contains data, a 'duplicate' error is returned in the job
- // result. The default value is WRITE_EMPTY. Each action is atomic and
- // only occurs if BigQuery is able to complete the job successfully.
- // Creation, truncation and append actions occur as one atomic update
- // upon job completion.
- WriteDisposition string `json:"writeDisposition,omitempty"`
-}
-
-type JobConfigurationLoad struct {
- // AllowJaggedRows: [Optional] Accept rows that are missing trailing
- // optional columns. The missing values are treated as nulls. If false,
- // records with missing trailing columns are treated as bad records, and
- // if there are too many bad records, an invalid error is returned in
- // the job result. The default value is false. Only applicable to CSV,
- // ignored for other formats.
- AllowJaggedRows bool `json:"allowJaggedRows,omitempty"`
-
- // AllowQuotedNewlines: Indicates if BigQuery should allow quoted data
- // sections that contain newline characters in a CSV file. The default
- // value is false.
- AllowQuotedNewlines bool `json:"allowQuotedNewlines,omitempty"`
-
- // CreateDisposition: [Optional] Specifies whether the job is allowed to
- // create new tables. The following values are supported:
- // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the
- // table. CREATE_NEVER: The table must already exist. If it does not, a
- // 'notFound' error is returned in the job result. The default value is
- // CREATE_IF_NEEDED. Creation, truncation and append actions occur as
- // one atomic update upon job completion.
- CreateDisposition string `json:"createDisposition,omitempty"`
-
- // DestinationTable: [Required] The destination table to load the data
- // into.
- DestinationTable *TableReference `json:"destinationTable,omitempty"`
-
- // Encoding: [Optional] The character encoding of the data. The
- // supported values are UTF-8 or ISO-8859-1. The default value is UTF-8.
- // BigQuery decodes the data after the raw, binary data has been split
- // using the values of the quote and fieldDelimiter properties.
- Encoding string `json:"encoding,omitempty"`
-
- // FieldDelimiter: [Optional] The separator for fields in a CSV file.
- // BigQuery converts the string to ISO-8859-1 encoding, and then uses
- // the first byte of the encoded string to split the data in its raw,
- // binary state. BigQuery also supports the escape sequence "\t" to
- // specify a tab separator. The default value is a comma (',').
- FieldDelimiter string `json:"fieldDelimiter,omitempty"`
-
- // IgnoreUnknownValues: [Optional] Indicates if BigQuery should allow
- // extra values that are not represented in the table schema. If true,
- // the extra values are ignored. If false, records with extra columns
- // are treated as bad records, and if there are too many bad records, an
- // invalid error is returned in the job result. The default value is
- // false. The sourceFormat property determines what BigQuery treats as
- // an extra value: CSV: Trailing columns JSON: Named values that don't
- // match any column names
- IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"`
-
- // MaxBadRecords: [Optional] The maximum number of bad records that
- // BigQuery can ignore when running the job. If the number of bad
- // records exceeds this value, an invalid error is returned in the job
- // result. The default value is 0, which requires that all records are
- // valid.
- MaxBadRecords int64 `json:"maxBadRecords,omitempty"`
-
- // ProjectionFields: [Experimental] If sourceFormat is set to
- // "DATASTORE_BACKUP", indicates which entity properties to load into
- // BigQuery from a Cloud Datastore backup. Property names are case
- // sensitive and must be top-level properties. If no properties are
- // specified, BigQuery loads all properties. If any named property isn't
- // found in the Cloud Datastore backup, an invalid error is returned in
- // the job result.
- ProjectionFields []string `json:"projectionFields,omitempty"`
-
- // Quote: [Optional] The value that is used to quote data sections in a
- // CSV file. BigQuery converts the string to ISO-8859-1 encoding, and
- // then uses the first byte of the encoded string to split the data in
- // its raw, binary state. The default value is a double-quote ('"'). If
- // your data does not contain quoted sections, set the property value to
- // an empty string. If your data contains quoted newline characters, you
- // must also set the allowQuotedNewlines property to true.
- Quote string `json:"quote,omitempty"`
-
- // Schema: [Optional] The schema for the destination table. The schema
- // can be omitted if the destination table already exists or if the
- // schema can be inferred from the loaded data.
- Schema *TableSchema `json:"schema,omitempty"`
-
- // SchemaInline: [Deprecated] The inline schema. For CSV schemas,
- // specify as "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING,
- // bar:INTEGER, baz:FLOAT".
- SchemaInline string `json:"schemaInline,omitempty"`
-
- // SchemaInlineFormat: [Deprecated] The format of the schemaInline
- // property.
- SchemaInlineFormat string `json:"schemaInlineFormat,omitempty"`
-
- // SkipLeadingRows: [Optional] The number of rows at the top of a CSV
- // file that BigQuery will skip when loading the data. The default value
- // is 0. This property is useful if you have header rows in the file
- // that should be skipped.
- SkipLeadingRows int64 `json:"skipLeadingRows,omitempty"`
-
- // SourceFormat: [Optional] The format of the data files. For CSV files,
- // specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For
- // newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". The default
- // value is CSV.
- SourceFormat string `json:"sourceFormat,omitempty"`
-
- // SourceUris: [Required] The fully-qualified URIs that point to your
- // data in Google Cloud Storage. Each URI can contain one '*' wildcard
- // character and it must come after the 'bucket' name.
- SourceUris []string `json:"sourceUris,omitempty"`
-
- // WriteDisposition: [Optional] Specifies the action that occurs if the
- // destination table already exists. The following values are supported:
- // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
- // table data. WRITE_APPEND: If the table already exists, BigQuery
- // appends the data to the table. WRITE_EMPTY: If the table already
- // exists and contains data, a 'duplicate' error is returned in the job
- // result. The default value is WRITE_EMPTY. Each action is atomic and
- // only occurs if BigQuery is able to complete the job successfully.
- // Creation, truncation and append actions occur as one atomic update
- // upon job completion.
- WriteDisposition string `json:"writeDisposition,omitempty"`
-}
-
-type JobConfigurationQuery struct {
- // AllowLargeResults: If true, allows the query to produce arbitrarily
- // large result tables at a slight cost in performance. Requires
- // destinationTable to be set.
- AllowLargeResults bool `json:"allowLargeResults,omitempty"`
-
- // CreateDisposition: [Optional] Specifies whether the job is allowed to
- // create new tables. The following values are supported:
- // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the
- // table. CREATE_NEVER: The table must already exist. If it does not, a
- // 'notFound' error is returned in the job result. The default value is
- // CREATE_IF_NEEDED. Creation, truncation and append actions occur as
- // one atomic update upon job completion.
- CreateDisposition string `json:"createDisposition,omitempty"`
-
- // DefaultDataset: [Optional] Specifies the default dataset to use for
- // unqualified table names in the query.
- DefaultDataset *DatasetReference `json:"defaultDataset,omitempty"`
-
- // DestinationTable: [Optional] Describes the table where the query
- // results should be stored. If not present, a new table will be created
- // to store the results.
- DestinationTable *TableReference `json:"destinationTable,omitempty"`
-
- // FlattenResults: [Optional] Flattens all nested and repeated fields in
- // the query results. The default value is true. allowLargeResults must
- // be true if this is set to false.
- FlattenResults bool `json:"flattenResults,omitempty"`
-
- // PreserveNulls: [Deprecated] This property is deprecated.
- PreserveNulls bool `json:"preserveNulls,omitempty"`
-
- // Priority: [Optional] Specifies a priority for the query. Possible
- // values include INTERACTIVE and BATCH. The default value is
- // INTERACTIVE.
- Priority string `json:"priority,omitempty"`
-
- // Query: [Required] BigQuery SQL query to execute.
- Query string `json:"query,omitempty"`
-
- // TableDefinitions: [Experimental] If querying an external data source
- // outside of BigQuery, describes the data format, location and other
- // properties of the data source. By defining these properties, the data
- // source can then be queried as if it were a standard BigQuery table.
- TableDefinitions map[string]ExternalDataConfiguration `json:"tableDefinitions,omitempty"`
-
- // UseQueryCache: [Optional] Whether to look for the result in the query
- // cache. The query cache is a best-effort cache that will be flushed
- // whenever tables in the query are modified. Moreover, the query cache
- // is only available when a query does not have a destination table
- // specified.
- UseQueryCache bool `json:"useQueryCache,omitempty"`
-
- // WriteDisposition: [Optional] Specifies the action that occurs if the
- // destination table already exists. The following values are supported:
- // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
- // table data. WRITE_APPEND: If the table already exists, BigQuery
- // appends the data to the table. WRITE_EMPTY: If the table already
- // exists and contains data, a 'duplicate' error is returned in the job
- // result. The default value is WRITE_EMPTY. Each action is atomic and
- // only occurs if BigQuery is able to complete the job successfully.
- // Creation, truncation and append actions occur as one atomic update
- // upon job completion.
- WriteDisposition string `json:"writeDisposition,omitempty"`
-}
-
-type JobConfigurationTableCopy struct {
- // CreateDisposition: [Optional] Specifies whether the job is allowed to
- // create new tables. The following values are supported:
- // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the
- // table. CREATE_NEVER: The table must already exist. If it does not, a
- // 'notFound' error is returned in the job result. The default value is
- // CREATE_IF_NEEDED. Creation, truncation and append actions occur as
- // one atomic update upon job completion.
- CreateDisposition string `json:"createDisposition,omitempty"`
-
- // DestinationTable: [Required] The destination table
- DestinationTable *TableReference `json:"destinationTable,omitempty"`
-
- // SourceTable: [Pick one] Source table to copy.
- SourceTable *TableReference `json:"sourceTable,omitempty"`
-
- // SourceTables: [Pick one] Source tables to copy.
- SourceTables []*TableReference `json:"sourceTables,omitempty"`
-
- // WriteDisposition: [Optional] Specifies the action that occurs if the
- // destination table already exists. The following values are supported:
- // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
- // table data. WRITE_APPEND: If the table already exists, BigQuery
- // appends the data to the table. WRITE_EMPTY: If the table already
- // exists and contains data, a 'duplicate' error is returned in the job
- // result. The default value is WRITE_EMPTY. Each action is atomic and
- // only occurs if BigQuery is able to complete the job successfully.
- // Creation, truncation and append actions occur as one atomic update
- // upon job completion.
- WriteDisposition string `json:"writeDisposition,omitempty"`
-}
-
-type JobList struct {
- // Etag: A hash of this page of results.
- Etag string `json:"etag,omitempty"`
-
- // Jobs: List of jobs that were requested.
- Jobs []*JobListJobs `json:"jobs,omitempty"`
-
- // Kind: The resource type of the response.
- Kind string `json:"kind,omitempty"`
-
- // NextPageToken: A token to request the next page of results.
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- // TotalItems: Total number of jobs in this collection.
- TotalItems int64 `json:"totalItems,omitempty"`
-}
-
-type JobListJobs struct {
- // Configuration: [Full-projection-only] Specifies the job
- // configuration.
- Configuration *JobConfiguration `json:"configuration,omitempty"`
-
- // ErrorResult: A result object that will be present only if the job has
- // failed.
- ErrorResult *ErrorProto `json:"errorResult,omitempty"`
-
- // Id: Unique opaque ID of the job.
- Id string `json:"id,omitempty"`
-
- // JobReference: Job reference uniquely identifying the job.
- JobReference *JobReference `json:"jobReference,omitempty"`
-
- // Kind: The resource type.
- Kind string `json:"kind,omitempty"`
-
- // State: Running state of the job. When the state is DONE, errorResult
- // can be checked to determine whether the job succeeded or failed.
- State string `json:"state,omitempty"`
-
- // Statistics: [Output-only] Information about the job, including
- // starting time and ending time of the job.
- Statistics *JobStatistics `json:"statistics,omitempty"`
-
- // Status: [Full-projection-only] Describes the state of the job.
- Status *JobStatus `json:"status,omitempty"`
-
- // UserEmail: [Full-projection-only] Email address of the user who ran
- // the job.
- UserEmail string `json:"user_email,omitempty"`
-}
-
-type JobReference struct {
- // JobId: [Required] The ID of the job. The ID must contain only letters
- // (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The
- // maximum length is 1,024 characters.
- JobId string `json:"jobId,omitempty"`
-
- // ProjectId: [Required] The ID of the project containing this job.
- ProjectId string `json:"projectId,omitempty"`
-}
-
-type JobStatistics struct {
- // CreationTime: [Output-only] Creation time of this job, in
- // milliseconds since the epoch. This field will be present on all jobs.
- CreationTime int64 `json:"creationTime,omitempty,string"`
-
- // EndTime: [Output-only] End time of this job, in milliseconds since
- // the epoch. This field will be present whenever a job is in the DONE
- // state.
- EndTime int64 `json:"endTime,omitempty,string"`
-
- // Extract: [Output-only] Statistics for an extract job.
- Extract *JobStatistics4 `json:"extract,omitempty"`
-
- // Load: [Output-only] Statistics for a load job.
- Load *JobStatistics3 `json:"load,omitempty"`
-
- // Query: [Output-only] Statistics for a query job.
- Query *JobStatistics2 `json:"query,omitempty"`
-
- // StartTime: [Output-only] Start time of this job, in milliseconds
- // since the epoch. This field will be present when the job transitions
- // from the PENDING state to either RUNNING or DONE.
- StartTime int64 `json:"startTime,omitempty,string"`
-
- // TotalBytesProcessed: [Output-only] [Deprecated] Use the bytes
- // processed in the query statistics instead.
- TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"`
-}
-
-type JobStatistics2 struct {
- // CacheHit: [Output-only] Whether the query result was fetched from the
- // query cache.
- CacheHit bool `json:"cacheHit,omitempty"`
-
- // TotalBytesProcessed: [Output-only] Total bytes processed for this
- // job.
- TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"`
-}
-
-type JobStatistics3 struct {
- // InputFileBytes: [Output-only] Number of bytes of source data in a
- // joad job.
- InputFileBytes int64 `json:"inputFileBytes,omitempty,string"`
-
- // InputFiles: [Output-only] Number of source files in a load job.
- InputFiles int64 `json:"inputFiles,omitempty,string"`
-
- // OutputBytes: [Output-only] Size of the loaded data in bytes. Note
- // that while an import job is in the running state, this value may
- // change.
- OutputBytes int64 `json:"outputBytes,omitempty,string"`
-
- // OutputRows: [Output-only] Number of rows imported in a load job. Note
- // that while an import job is in the running state, this value may
- // change.
- OutputRows int64 `json:"outputRows,omitempty,string"`
-}
-
-type JobStatistics4 struct {
- // DestinationUriFileCounts: [Experimental] Number of files per
- // destination URI or URI pattern specified in the extract
- // configuration. These values will be in the same order as the URIs
- // specified in the 'destinationUris' field.
- DestinationUriFileCounts googleapi.Int64s `json:"destinationUriFileCounts,omitempty"`
-}
-
-type JobStatus struct {
- // ErrorResult: [Output-only] Final error result of the job. If present,
- // indicates that the job has completed and was unsuccessful.
- ErrorResult *ErrorProto `json:"errorResult,omitempty"`
-
- // Errors: [Output-only] All errors encountered during the running of
- // the job. Errors here do not necessarily mean that the job has
- // completed or was unsuccessful.
- Errors []*ErrorProto `json:"errors,omitempty"`
-
- // State: [Output-only] Running state of the job.
- State string `json:"state,omitempty"`
-}
-
-type JsonValue interface{}
-
-type ProjectList struct {
- // Etag: A hash of the page of results
- Etag string `json:"etag,omitempty"`
-
- // Kind: The type of list.
- Kind string `json:"kind,omitempty"`
-
- // NextPageToken: A token to request the next page of results.
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- // Projects: Projects to which you have at least READ access.
- Projects []*ProjectListProjects `json:"projects,omitempty"`
-
- // TotalItems: The total number of projects in the list.
- TotalItems int64 `json:"totalItems,omitempty"`
-}
-
-type ProjectListProjects struct {
- // FriendlyName: A descriptive name for this project.
- FriendlyName string `json:"friendlyName,omitempty"`
-
- // Id: An opaque ID of this project.
- Id string `json:"id,omitempty"`
-
- // Kind: The resource type.
- Kind string `json:"kind,omitempty"`
-
- // NumericId: The numeric ID of this project.
- NumericId uint64 `json:"numericId,omitempty,string"`
-
- // ProjectReference: A unique reference to this project.
- ProjectReference *ProjectReference `json:"projectReference,omitempty"`
-}
-
-type ProjectReference struct {
- // ProjectId: [Required] ID of the project. Can be either the numeric ID
- // or the assigned ID of the project.
- ProjectId string `json:"projectId,omitempty"`
-}
-
-type QueryRequest struct {
- // DefaultDataset: [Optional] Specifies the default datasetId and
- // projectId to assume for any unqualified table names in the query. If
- // not set, all table names in the query string must be qualified in the
- // format 'datasetId.tableId'.
- DefaultDataset *DatasetReference `json:"defaultDataset,omitempty"`
-
- // DryRun: [Optional] If set, don't actually run this job. A valid query
- // will return a mostly empty response with some processing statistics,
- // while an invalid query will return the same error it would if it
- // wasn't a dry run.
- DryRun bool `json:"dryRun,omitempty"`
-
- // Kind: The resource type of the request.
- Kind string `json:"kind,omitempty"`
-
- // MaxResults: [Optional] The maximum number of rows of data to return
- // per page of results. Setting this flag to a small value such as 1000
- // and then paging through results might improve reliability when the
- // query result set is large. In addition to this limit, responses are
- // also limited to 10 MB. By default, there is no maximum row count, and
- // only the byte limit applies.
- MaxResults int64 `json:"maxResults,omitempty"`
-
- // PreserveNulls: [Deprecated] This property is deprecated.
- PreserveNulls bool `json:"preserveNulls,omitempty"`
-
- // Query: [Required] A query string, following the BigQuery query
- // syntax, of the query to execute. Example: "SELECT count(f1) FROM
- // [myProjectId:myDatasetId.myTableId]".
- Query string `json:"query,omitempty"`
-
- // TimeoutMs: [Optional] How long to wait for the query to complete, in
- // milliseconds, before the request times out and returns. Note that
- // this is only a timeout for the request, not the query. If the query
- // takes longer to run than the timeout value, the call returns without
- // any results and with the 'jobComplete' flag set to false. You can
- // call GetQueryResults() to wait for the query to complete and read the
- // results. The default value is 10000 milliseconds (10 seconds).
- TimeoutMs int64 `json:"timeoutMs,omitempty"`
-
- // UseQueryCache: [Optional] Whether to look for the result in the query
- // cache. The query cache is a best-effort cache that will be flushed
- // whenever tables in the query are modified. The default value is true.
- UseQueryCache bool `json:"useQueryCache,omitempty"`
-}
-
-type QueryResponse struct {
- // CacheHit: Whether the query result was fetched from the query cache.
- CacheHit bool `json:"cacheHit,omitempty"`
-
- // JobComplete: Whether the query has completed or not. If rows or
- // totalRows are present, this will always be true. If this is false,
- // totalRows will not be available.
- JobComplete bool `json:"jobComplete,omitempty"`
-
- // JobReference: Reference to the Job that was created to run the query.
- // This field will be present even if the original request timed out, in
- // which case GetQueryResults can be used to read the results once the
- // query has completed. Since this API only returns the first page of
- // results, subsequent pages can be fetched via the same mechanism
- // (GetQueryResults).
- JobReference *JobReference `json:"jobReference,omitempty"`
-
- // Kind: The resource type.
- Kind string `json:"kind,omitempty"`
-
- // PageToken: A token used for paging results.
- PageToken string `json:"pageToken,omitempty"`
-
- // Rows: An object with as many results as can be contained within the
- // maximum permitted reply size. To get any additional rows, you can
- // call GetQueryResults and specify the jobReference returned above.
- Rows []*TableRow `json:"rows,omitempty"`
-
- // Schema: The schema of the results. Present only when the query
- // completes successfully.
- Schema *TableSchema `json:"schema,omitempty"`
-
- // TotalBytesProcessed: The total number of bytes processed for this
- // query. If this query was a dry run, this is the number of bytes that
- // would be processed if the query were run.
- TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"`
-
- // TotalRows: The total number of rows in the complete query result set,
- // which can be more than the number of rows in this single page of
- // results.
- TotalRows uint64 `json:"totalRows,omitempty,string"`
-}
-
-type Table struct {
- // CreationTime: [Output-only] The time when this table was created, in
- // milliseconds since the epoch.
- CreationTime int64 `json:"creationTime,omitempty,string"`
-
- // Description: [Optional] A user-friendly description of this table.
- Description string `json:"description,omitempty"`
-
- // Etag: [Output-only] A hash of this resource.
- Etag string `json:"etag,omitempty"`
-
- // ExpirationTime: [Optional] The time when this table expires, in
- // milliseconds since the epoch. If not present, the table will persist
- // indefinitely. Expired tables will be deleted and their storage
- // reclaimed.
- ExpirationTime int64 `json:"expirationTime,omitempty,string"`
-
- // FriendlyName: [Optional] A descriptive name for this table.
- FriendlyName string `json:"friendlyName,omitempty"`
-
- // Id: [Output-only] An opaque ID uniquely identifying the table.
- Id string `json:"id,omitempty"`
-
- // Kind: [Output-only] The type of the resource.
- Kind string `json:"kind,omitempty"`
-
- // LastModifiedTime: [Output-only] The time when this table was last
- // modified, in milliseconds since the epoch.
- LastModifiedTime uint64 `json:"lastModifiedTime,omitempty,string"`
-
- // NumBytes: [Output-only] The size of the table in bytes. This property
- // is unavailable for tables that are actively receiving streaming
- // inserts.
- NumBytes int64 `json:"numBytes,omitempty,string"`
-
- // NumRows: [Output-only] The number of rows of data in this table. This
- // property is unavailable for tables that are actively receiving
- // streaming inserts.
- NumRows uint64 `json:"numRows,omitempty,string"`
-
- // Schema: [Optional] Describes the schema of this table.
- Schema *TableSchema `json:"schema,omitempty"`
-
- // SelfLink: [Output-only] A URL that can be used to access this
- // resource again.
- SelfLink string `json:"selfLink,omitempty"`
-
- // TableReference: [Required] Reference describing the ID of this table.
- TableReference *TableReference `json:"tableReference,omitempty"`
-
- // Type: [Output-only] Describes the table type. The following values
- // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table
- // defined by a SQL query. The default value is TABLE.
- Type string `json:"type,omitempty"`
-
- // View: [Optional] The view definition.
- View *ViewDefinition `json:"view,omitempty"`
-}
-
-type TableCell struct {
- V interface{} `json:"v,omitempty"`
-}
-
-type TableDataInsertAllRequest struct {
- // IgnoreUnknownValues: [Optional] Accept rows that contain values that
- // do not match the schema. The unknown values are ignored. Default is
- // false, which treats unknown values as errors.
- IgnoreUnknownValues bool `json:"ignoreUnknownValues,omitempty"`
-
- // Kind: The resource type of the response.
- Kind string `json:"kind,omitempty"`
-
- // Rows: The rows to insert.
- Rows []*TableDataInsertAllRequestRows `json:"rows,omitempty"`
-
- // SkipInvalidRows: [Optional] Insert all valid rows of a request, even
- // if invalid rows exist. The default value is false, which causes the
- // entire request to fail if any invalid rows exist.
- SkipInvalidRows bool `json:"skipInvalidRows,omitempty"`
-}
-
-type TableDataInsertAllRequestRows struct {
- // InsertId: [Optional] A unique ID for each row. BigQuery uses this
- // property to detect duplicate insertion requests on a best-effort
- // basis.
- InsertId string `json:"insertId,omitempty"`
-
- // Json: [Required] A JSON object that contains a row of data. The
- // object's properties and values must match the destination table's
- // schema.
- Json map[string]JsonValue `json:"json,omitempty"`
-}
-
-type TableDataInsertAllResponse struct {
- // InsertErrors: An array of errors for rows that were not inserted.
- InsertErrors []*TableDataInsertAllResponseInsertErrors `json:"insertErrors,omitempty"`
-
- // Kind: The resource type of the response.
- Kind string `json:"kind,omitempty"`
-}
-
-type TableDataInsertAllResponseInsertErrors struct {
- // Errors: Error information for the row indicated by the index
- // property.
- Errors []*ErrorProto `json:"errors,omitempty"`
-
- // Index: The index of the row that error applies to.
- Index int64 `json:"index,omitempty"`
-}
-
-type TableDataList struct {
- // Etag: A hash of this page of results.
- Etag string `json:"etag,omitempty"`
-
- // Kind: The resource type of the response.
- Kind string `json:"kind,omitempty"`
-
- // PageToken: A token used for paging results. Providing this token
- // instead of the startIndex parameter can help you retrieve stable
- // results when an underlying table is changing.
- PageToken string `json:"pageToken,omitempty"`
-
- // Rows: Rows of results.
- Rows []*TableRow `json:"rows,omitempty"`
-
- // TotalRows: The total number of rows in the complete table.
- TotalRows int64 `json:"totalRows,omitempty,string"`
-}
-
-type TableFieldSchema struct {
- // Description: [Optional] The field description. The maximum length is
- // 16K characters.
- Description string `json:"description,omitempty"`
-
- // Fields: [Optional] Describes the nested schema fields if the type
- // property is set to RECORD.
- Fields []*TableFieldSchema `json:"fields,omitempty"`
-
- // Mode: [Optional] The field mode. Possible values include NULLABLE,
- // REQUIRED and REPEATED. The default value is NULLABLE.
- Mode string `json:"mode,omitempty"`
-
- // Name: [Required] The field name. The name must contain only letters
- // (a-z, A-Z), numbers (0-9), or underscores (_), and must start with a
- // letter or underscore. The maximum length is 128 characters.
- Name string `json:"name,omitempty"`
-
- // Type: [Required] The field data type. Possible values include STRING,
- // INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates
- // that the field contains a nested schema).
- Type string `json:"type,omitempty"`
-}
-
-type TableList struct {
- // Etag: A hash of this page of results.
- Etag string `json:"etag,omitempty"`
-
- // Kind: The type of list.
- Kind string `json:"kind,omitempty"`
-
- // NextPageToken: A token to request the next page of results.
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- // Tables: Tables in the requested dataset.
- Tables []*TableListTables `json:"tables,omitempty"`
-
- // TotalItems: The total number of tables in the dataset.
- TotalItems int64 `json:"totalItems,omitempty"`
-}
-
-type TableListTables struct {
- // FriendlyName: The user-friendly name for this table.
- FriendlyName string `json:"friendlyName,omitempty"`
-
- // Id: An opaque ID of the table
- Id string `json:"id,omitempty"`
-
- // Kind: The resource type.
- Kind string `json:"kind,omitempty"`
-
- // TableReference: A reference uniquely identifying the table.
- TableReference *TableReference `json:"tableReference,omitempty"`
-
- // Type: The type of table. Possible values are: TABLE, VIEW.
- Type string `json:"type,omitempty"`
-}
-
-type TableReference struct {
- // DatasetId: [Required] The ID of the dataset containing this table.
- DatasetId string `json:"datasetId,omitempty"`
-
- // ProjectId: [Required] The ID of the project containing this table.
- ProjectId string `json:"projectId,omitempty"`
-
- // TableId: [Required] The ID of the table. The ID must contain only
- // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum
- // length is 1,024 characters.
- TableId string `json:"tableId,omitempty"`
-}
-
-type TableRow struct {
- F []*TableCell `json:"f,omitempty"`
-}
-
-type TableSchema struct {
- // Fields: Describes the fields in a table.
- Fields []*TableFieldSchema `json:"fields,omitempty"`
-}
-
-type ViewDefinition struct {
- // Query: [Required] A query that BigQuery executes when the view is
- // referenced.
- Query string `json:"query,omitempty"`
-}
-
-// method id "bigquery.datasets.delete":
-
-type DatasetsDeleteCall struct {
- s *Service
- projectId string
- datasetId string
- opt_ map[string]interface{}
-}
-
-// Delete: Deletes the dataset specified by the datasetId value. Before
-// you can delete a dataset, you must delete all its tables, either
-// manually or by specifying deleteContents. Immediately after deletion,
-// you can create another dataset with the same name.
-func (r *DatasetsService) Delete(projectId string, datasetId string) *DatasetsDeleteCall {
- c := &DatasetsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- return c
-}
-
-// DeleteContents sets the optional parameter "deleteContents": If True,
-// delete all the tables in the dataset. If False and the dataset
-// contains tables, the request will fail. Default is False
-func (c *DatasetsDeleteCall) DeleteContents(deleteContents bool) *DatasetsDeleteCall {
- c.opt_["deleteContents"] = deleteContents
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DatasetsDeleteCall) Fields(s ...googleapi.Field) *DatasetsDeleteCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *DatasetsDeleteCall) Do() error {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["deleteContents"]; ok {
- params.Set("deleteContents", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name.",
- // "httpMethod": "DELETE",
- // "id": "bigquery.datasets.delete",
- // "parameterOrder": [
- // "projectId",
- // "datasetId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of dataset being deleted",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "deleteContents": {
- // "description": "If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False",
- // "location": "query",
- // "type": "boolean"
- // },
- // "projectId": {
- // "description": "Project ID of the dataset being deleted",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}",
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.datasets.get":
-
-type DatasetsGetCall struct {
- s *Service
- projectId string
- datasetId string
- opt_ map[string]interface{}
-}
-
-// Get: Returns the dataset specified by datasetID.
-func (r *DatasetsService) Get(projectId string, datasetId string) *DatasetsGetCall {
- c := &DatasetsGetCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DatasetsGetCall) Fields(s ...googleapi.Field) *DatasetsGetCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *DatasetsGetCall) Do() (*Dataset, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Dataset
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns the dataset specified by datasetID.",
- // "httpMethod": "GET",
- // "id": "bigquery.datasets.get",
- // "parameterOrder": [
- // "projectId",
- // "datasetId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the requested dataset",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the requested dataset",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}",
- // "response": {
- // "$ref": "Dataset"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.datasets.insert":
-
-type DatasetsInsertCall struct {
- s *Service
- projectId string
- dataset *Dataset
- opt_ map[string]interface{}
-}
-
-// Insert: Creates a new empty dataset.
-func (r *DatasetsService) Insert(projectId string, dataset *Dataset) *DatasetsInsertCall {
- c := &DatasetsInsertCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.dataset = dataset
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DatasetsInsertCall) Fields(s ...googleapi.Field) *DatasetsInsertCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *DatasetsInsertCall) Do() (*Dataset, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Dataset
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new empty dataset.",
- // "httpMethod": "POST",
- // "id": "bigquery.datasets.insert",
- // "parameterOrder": [
- // "projectId"
- // ],
- // "parameters": {
- // "projectId": {
- // "description": "Project ID of the new dataset",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets",
- // "request": {
- // "$ref": "Dataset"
- // },
- // "response": {
- // "$ref": "Dataset"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.datasets.list":
-
-type DatasetsListCall struct {
- s *Service
- projectId string
- opt_ map[string]interface{}
-}
-
-// List: Lists all datasets in the specified project to which you have
-// been granted the READER dataset role.
-func (r *DatasetsService) List(projectId string) *DatasetsListCall {
- c := &DatasetsListCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- return c
-}
-
-// All sets the optional parameter "all": Whether to list all datasets,
-// including hidden ones
-func (c *DatasetsListCall) All(all bool) *DatasetsListCall {
- c.opt_["all"] = all
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": The maximum
-// number of results to return
-func (c *DatasetsListCall) MaxResults(maxResults int64) *DatasetsListCall {
- c.opt_["maxResults"] = maxResults
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Page token,
-// returned by a previous call, to request the next page of results
-func (c *DatasetsListCall) PageToken(pageToken string) *DatasetsListCall {
- c.opt_["pageToken"] = pageToken
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DatasetsListCall) Fields(s ...googleapi.Field) *DatasetsListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *DatasetsListCall) Do() (*DatasetList, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["all"]; ok {
- params.Set("all", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["maxResults"]; ok {
- params.Set("maxResults", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["pageToken"]; ok {
- params.Set("pageToken", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *DatasetList
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists all datasets in the specified project to which you have been granted the READER dataset role.",
- // "httpMethod": "GET",
- // "id": "bigquery.datasets.list",
- // "parameterOrder": [
- // "projectId"
- // ],
- // "parameters": {
- // "all": {
- // "description": "Whether to list all datasets, including hidden ones",
- // "location": "query",
- // "type": "boolean"
- // },
- // "maxResults": {
- // "description": "The maximum number of results to return",
- // "format": "uint32",
- // "location": "query",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "Page token, returned by a previous call, to request the next page of results",
- // "location": "query",
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the datasets to be listed",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets",
- // "response": {
- // "$ref": "DatasetList"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.datasets.patch":
-
-type DatasetsPatchCall struct {
- s *Service
- projectId string
- datasetId string
- dataset *Dataset
- opt_ map[string]interface{}
-}
-
-// Patch: Updates information in an existing dataset. The update method
-// replaces the entire dataset resource, whereas the patch method only
-// replaces fields that are provided in the submitted dataset resource.
-// This method supports patch semantics.
-func (r *DatasetsService) Patch(projectId string, datasetId string, dataset *Dataset) *DatasetsPatchCall {
- c := &DatasetsPatchCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- c.dataset = dataset
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DatasetsPatchCall) Fields(s ...googleapi.Field) *DatasetsPatchCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *DatasetsPatchCall) Do() (*Dataset, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Dataset
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource. This method supports patch semantics.",
- // "httpMethod": "PATCH",
- // "id": "bigquery.datasets.patch",
- // "parameterOrder": [
- // "projectId",
- // "datasetId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the dataset being updated",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the dataset being updated",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}",
- // "request": {
- // "$ref": "Dataset"
- // },
- // "response": {
- // "$ref": "Dataset"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.datasets.update":
-
-type DatasetsUpdateCall struct {
- s *Service
- projectId string
- datasetId string
- dataset *Dataset
- opt_ map[string]interface{}
-}
-
-// Update: Updates information in an existing dataset. The update method
-// replaces the entire dataset resource, whereas the patch method only
-// replaces fields that are provided in the submitted dataset resource.
-func (r *DatasetsService) Update(projectId string, datasetId string, dataset *Dataset) *DatasetsUpdateCall {
- c := &DatasetsUpdateCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- c.dataset = dataset
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *DatasetsUpdateCall) Fields(s ...googleapi.Field) *DatasetsUpdateCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *DatasetsUpdateCall) Do() (*Dataset, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.dataset)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Dataset
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates information in an existing dataset. The update method replaces the entire dataset resource, whereas the patch method only replaces fields that are provided in the submitted dataset resource.",
- // "httpMethod": "PUT",
- // "id": "bigquery.datasets.update",
- // "parameterOrder": [
- // "projectId",
- // "datasetId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the dataset being updated",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the dataset being updated",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}",
- // "request": {
- // "$ref": "Dataset"
- // },
- // "response": {
- // "$ref": "Dataset"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.jobs.get":
-
-type JobsGetCall struct {
- s *Service
- projectId string
- jobId string
- opt_ map[string]interface{}
-}
-
-// Get: Returns information about a specific job. Job information is
-// available for a six month period after creation. Requires that you're
-// the person who ran the job, or have the Is Owner project role.
-func (r *JobsService) Get(projectId string, jobId string) *JobsGetCall {
- c := &JobsGetCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.jobId = jobId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *JobsGetCall) Fields(s ...googleapi.Field) *JobsGetCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *JobsGetCall) Do() (*Job, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs/{jobId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "jobId": c.jobId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Job
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns information about a specific job. Job information is available for a six month period after creation. Requires that you're the person who ran the job, or have the Is Owner project role.",
- // "httpMethod": "GET",
- // "id": "bigquery.jobs.get",
- // "parameterOrder": [
- // "projectId",
- // "jobId"
- // ],
- // "parameters": {
- // "jobId": {
- // "description": "Job ID of the requested job",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the requested job",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/jobs/{jobId}",
- // "response": {
- // "$ref": "Job"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.jobs.getQueryResults":
-
-type JobsGetQueryResultsCall struct {
- s *Service
- projectId string
- jobId string
- opt_ map[string]interface{}
-}
-
-// GetQueryResults: Retrieves the results of a query job.
-func (r *JobsService) GetQueryResults(projectId string, jobId string) *JobsGetQueryResultsCall {
- c := &JobsGetQueryResultsCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.jobId = jobId
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of results to read
-func (c *JobsGetQueryResultsCall) MaxResults(maxResults int64) *JobsGetQueryResultsCall {
- c.opt_["maxResults"] = maxResults
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Page token,
-// returned by a previous call, to request the next page of results
-func (c *JobsGetQueryResultsCall) PageToken(pageToken string) *JobsGetQueryResultsCall {
- c.opt_["pageToken"] = pageToken
- return c
-}
-
-// StartIndex sets the optional parameter "startIndex": Zero-based index
-// of the starting row
-func (c *JobsGetQueryResultsCall) StartIndex(startIndex uint64) *JobsGetQueryResultsCall {
- c.opt_["startIndex"] = startIndex
- return c
-}
-
-// TimeoutMs sets the optional parameter "timeoutMs": How long to wait
-// for the query to complete, in milliseconds, before returning. Default
-// is to return immediately. If the timeout passes before the job
-// completes, the request will fail with a TIMEOUT error
-func (c *JobsGetQueryResultsCall) TimeoutMs(timeoutMs int64) *JobsGetQueryResultsCall {
- c.opt_["timeoutMs"] = timeoutMs
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *JobsGetQueryResultsCall) Fields(s ...googleapi.Field) *JobsGetQueryResultsCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *JobsGetQueryResultsCall) Do() (*GetQueryResultsResponse, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["maxResults"]; ok {
- params.Set("maxResults", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["pageToken"]; ok {
- params.Set("pageToken", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["startIndex"]; ok {
- params.Set("startIndex", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["timeoutMs"]; ok {
- params.Set("timeoutMs", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/queries/{jobId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "jobId": c.jobId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *GetQueryResultsResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves the results of a query job.",
- // "httpMethod": "GET",
- // "id": "bigquery.jobs.getQueryResults",
- // "parameterOrder": [
- // "projectId",
- // "jobId"
- // ],
- // "parameters": {
- // "jobId": {
- // "description": "Job ID of the query job",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "maxResults": {
- // "description": "Maximum number of results to read",
- // "format": "uint32",
- // "location": "query",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "Page token, returned by a previous call, to request the next page of results",
- // "location": "query",
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the query job",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "startIndex": {
- // "description": "Zero-based index of the starting row",
- // "format": "uint64",
- // "location": "query",
- // "type": "string"
- // },
- // "timeoutMs": {
- // "description": "How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error",
- // "format": "uint32",
- // "location": "query",
- // "type": "integer"
- // }
- // },
- // "path": "projects/{projectId}/queries/{jobId}",
- // "response": {
- // "$ref": "GetQueryResultsResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.jobs.insert":
-
-type JobsInsertCall struct {
- s *Service
- projectId string
- job *Job
- opt_ map[string]interface{}
- media_ io.Reader
- resumable_ googleapi.SizeReaderAt
- mediaType_ string
- ctx_ context.Context
- protocol_ string
-}
-
-// Insert: Starts a new asynchronous job. Requires the Can View project
-// role.
-func (r *JobsService) Insert(projectId string, job *Job) *JobsInsertCall {
- c := &JobsInsertCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.job = job
- return c
-}
-
-// Media specifies the media to upload in a single chunk.
-// At most one of Media and ResumableMedia may be set.
-func (c *JobsInsertCall) Media(r io.Reader) *JobsInsertCall {
- c.media_ = r
- c.protocol_ = "multipart"
- return c
-}
-
-// ResumableMedia specifies the media to upload in chunks and can be cancelled with ctx.
-// At most one of Media and ResumableMedia may be set.
-// mediaType identifies the MIME media type of the upload, such as "image/png".
-// If mediaType is "", it will be auto-detected.
-func (c *JobsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *JobsInsertCall {
- c.ctx_ = ctx
- c.resumable_ = io.NewSectionReader(r, 0, size)
- c.mediaType_ = mediaType
- c.protocol_ = "resumable"
- return c
-}
-
-// ProgressUpdater provides a callback function that will be called after every chunk.
-// It should be a low-latency function in order to not slow down the upload operation.
-// This should only be called when using ResumableMedia (as opposed to Media).
-func (c *JobsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *JobsInsertCall {
- c.opt_["progressUpdater"] = pu
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *JobsInsertCall) Fields(s ...googleapi.Field) *JobsInsertCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *JobsInsertCall) Do() (*Job, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.job)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs")
- var progressUpdater_ googleapi.ProgressUpdater
- if v, ok := c.opt_["progressUpdater"]; ok {
- if pu, ok := v.(googleapi.ProgressUpdater); ok {
- progressUpdater_ = pu
- }
- }
- if c.media_ != nil || c.resumable_ != nil {
- urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1)
- params.Set("uploadType", c.protocol_)
- }
- urls += "?" + params.Encode()
- if c.protocol_ != "resumable" {
- var cancel func()
- cancel, _ = googleapi.ConditionallyIncludeMedia(c.media_, &body, &ctype)
- if cancel != nil {
- defer cancel()
- }
- }
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- })
- if c.protocol_ == "resumable" {
- req.ContentLength = 0
- if c.mediaType_ == "" {
- c.mediaType_ = googleapi.DetectMediaType(c.resumable_)
- }
- req.Header.Set("X-Upload-Content-Type", c.mediaType_)
- req.Body = nil
- } else {
- req.Header.Set("Content-Type", ctype)
- }
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- if c.protocol_ == "resumable" {
- loc := res.Header.Get("Location")
- rx := &googleapi.ResumableUpload{
- Client: c.s.client,
- UserAgent: c.s.userAgent(),
- URI: loc,
- Media: c.resumable_,
- MediaType: c.mediaType_,
- ContentLength: c.resumable_.Size(),
- Callback: progressUpdater_,
- }
- res, err = rx.Upload(c.ctx_)
- if err != nil {
- return nil, err
- }
- defer res.Body.Close()
- }
- var ret *Job
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Starts a new asynchronous job. Requires the Can View project role.",
- // "httpMethod": "POST",
- // "id": "bigquery.jobs.insert",
- // "mediaUpload": {
- // "accept": [
- // "*/*"
- // ],
- // "protocols": {
- // "resumable": {
- // "multipart": true,
- // "path": "/resumable/upload/bigquery/v2/projects/{projectId}/jobs"
- // },
- // "simple": {
- // "multipart": true,
- // "path": "/upload/bigquery/v2/projects/{projectId}/jobs"
- // }
- // }
- // },
- // "parameterOrder": [
- // "projectId"
- // ],
- // "parameters": {
- // "projectId": {
- // "description": "Project ID of the project that will be billed for the job",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/jobs",
- // "request": {
- // "$ref": "Job"
- // },
- // "response": {
- // "$ref": "Job"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ],
- // "supportsMediaUpload": true
- // }
-
-}
-
-// method id "bigquery.jobs.list":
-
-type JobsListCall struct {
- s *Service
- projectId string
- opt_ map[string]interface{}
-}
-
-// List: Lists all jobs that you started in the specified project. The
-// job list returns in reverse chronological order of when the jobs were
-// created, starting with the most recent job created. Requires the Can
-// View project role, or the Is Owner project role if you set the
-// allUsers property.
-func (r *JobsService) List(projectId string) *JobsListCall {
- c := &JobsListCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- return c
-}
-
-// AllUsers sets the optional parameter "allUsers": Whether to display
-// jobs owned by all users in the project. Default false
-func (c *JobsListCall) AllUsers(allUsers bool) *JobsListCall {
- c.opt_["allUsers"] = allUsers
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of results to return
-func (c *JobsListCall) MaxResults(maxResults int64) *JobsListCall {
- c.opt_["maxResults"] = maxResults
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Page token,
-// returned by a previous call, to request the next page of results
-func (c *JobsListCall) PageToken(pageToken string) *JobsListCall {
- c.opt_["pageToken"] = pageToken
- return c
-}
-
-// Projection sets the optional parameter "projection": Restrict
-// information returned to a set of selected fields
-//
-// Possible values:
-// "full" - Includes all job data
-// "minimal" - Does not include the job configuration
-func (c *JobsListCall) Projection(projection string) *JobsListCall {
- c.opt_["projection"] = projection
- return c
-}
-
-// StateFilter sets the optional parameter "stateFilter": Filter for job
-// state
-//
-// Possible values:
-// "done" - Finished jobs
-// "pending" - Pending jobs
-// "running" - Running jobs
-func (c *JobsListCall) StateFilter(stateFilter string) *JobsListCall {
- c.opt_["stateFilter"] = stateFilter
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *JobsListCall) Fields(s ...googleapi.Field) *JobsListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *JobsListCall) Do() (*JobList, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["allUsers"]; ok {
- params.Set("allUsers", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["maxResults"]; ok {
- params.Set("maxResults", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["pageToken"]; ok {
- params.Set("pageToken", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["projection"]; ok {
- params.Set("projection", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["stateFilter"]; ok {
- params.Set("stateFilter", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/jobs")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *JobList
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists all jobs that you started in the specified project. The job list returns in reverse chronological order of when the jobs were created, starting with the most recent job created. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.",
- // "httpMethod": "GET",
- // "id": "bigquery.jobs.list",
- // "parameterOrder": [
- // "projectId"
- // ],
- // "parameters": {
- // "allUsers": {
- // "description": "Whether to display jobs owned by all users in the project. Default false",
- // "location": "query",
- // "type": "boolean"
- // },
- // "maxResults": {
- // "description": "Maximum number of results to return",
- // "format": "uint32",
- // "location": "query",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "Page token, returned by a previous call, to request the next page of results",
- // "location": "query",
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the jobs to list",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projection": {
- // "description": "Restrict information returned to a set of selected fields",
- // "enum": [
- // "full",
- // "minimal"
- // ],
- // "enumDescriptions": [
- // "Includes all job data",
- // "Does not include the job configuration"
- // ],
- // "location": "query",
- // "type": "string"
- // },
- // "stateFilter": {
- // "description": "Filter for job state",
- // "enum": [
- // "done",
- // "pending",
- // "running"
- // ],
- // "enumDescriptions": [
- // "Finished jobs",
- // "Pending jobs",
- // "Running jobs"
- // ],
- // "location": "query",
- // "repeated": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/jobs",
- // "response": {
- // "$ref": "JobList"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.jobs.query":
-
-type JobsQueryCall struct {
- s *Service
- projectId string
- queryrequest *QueryRequest
- opt_ map[string]interface{}
-}
-
-// Query: Runs a BigQuery SQL query synchronously and returns query
-// results if the query completes within a specified timeout.
-func (r *JobsService) Query(projectId string, queryrequest *QueryRequest) *JobsQueryCall {
- c := &JobsQueryCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.queryrequest = queryrequest
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *JobsQueryCall) Fields(s ...googleapi.Field) *JobsQueryCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *JobsQueryCall) Do() (*QueryResponse, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.queryrequest)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/queries")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *QueryResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.",
- // "httpMethod": "POST",
- // "id": "bigquery.jobs.query",
- // "parameterOrder": [
- // "projectId"
- // ],
- // "parameters": {
- // "projectId": {
- // "description": "Project ID of the project billed for the query",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/queries",
- // "request": {
- // "$ref": "QueryRequest"
- // },
- // "response": {
- // "$ref": "QueryResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.projects.list":
-
-type ProjectsListCall struct {
- s *Service
- opt_ map[string]interface{}
-}
-
-// List: Lists all projects to which you have been granted any project
-// role.
-func (r *ProjectsService) List() *ProjectsListCall {
- c := &ProjectsListCall{s: r.s, opt_: make(map[string]interface{})}
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of results to return
-func (c *ProjectsListCall) MaxResults(maxResults int64) *ProjectsListCall {
- c.opt_["maxResults"] = maxResults
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Page token,
-// returned by a previous call, to request the next page of results
-func (c *ProjectsListCall) PageToken(pageToken string) *ProjectsListCall {
- c.opt_["pageToken"] = pageToken
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsListCall) Fields(s ...googleapi.Field) *ProjectsListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsListCall) Do() (*ProjectList, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["maxResults"]; ok {
- params.Set("maxResults", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["pageToken"]; ok {
- params.Set("pageToken", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.SetOpaque(req.URL)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *ProjectList
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists all projects to which you have been granted any project role.",
- // "httpMethod": "GET",
- // "id": "bigquery.projects.list",
- // "parameters": {
- // "maxResults": {
- // "description": "Maximum number of results to return",
- // "format": "uint32",
- // "location": "query",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "Page token, returned by a previous call, to request the next page of results",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "projects",
- // "response": {
- // "$ref": "ProjectList"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.tabledata.insertAll":
-
-type TabledataInsertAllCall struct {
- s *Service
- projectId string
- datasetId string
- tableId string
- tabledatainsertallrequest *TableDataInsertAllRequest
- opt_ map[string]interface{}
-}
-
-// InsertAll: Streams data into BigQuery one record at a time without
-// needing to run a load job. Requires the WRITER dataset role.
-func (r *TabledataService) InsertAll(projectId string, datasetId string, tableId string, tabledatainsertallrequest *TableDataInsertAllRequest) *TabledataInsertAllCall {
- c := &TabledataInsertAllCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- c.tableId = tableId
- c.tabledatainsertallrequest = tabledatainsertallrequest
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *TabledataInsertAllCall) Fields(s ...googleapi.Field) *TabledataInsertAllCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *TabledataInsertAllCall) Do() (*TableDataInsertAllResponse, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.tabledatainsertallrequest)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- "tableId": c.tableId,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *TableDataInsertAllResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.",
- // "httpMethod": "POST",
- // "id": "bigquery.tabledata.insertAll",
- // "parameterOrder": [
- // "projectId",
- // "datasetId",
- // "tableId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the destination table.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the destination table.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "tableId": {
- // "description": "Table ID of the destination table.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/insertAll",
- // "request": {
- // "$ref": "TableDataInsertAllRequest"
- // },
- // "response": {
- // "$ref": "TableDataInsertAllResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/bigquery.insertdata",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.tabledata.list":
-
-type TabledataListCall struct {
- s *Service
- projectId string
- datasetId string
- tableId string
- opt_ map[string]interface{}
-}
-
-// List: Retrieves table data from a specified set of rows. Requires the
-// READER dataset role.
-func (r *TabledataService) List(projectId string, datasetId string, tableId string) *TabledataListCall {
- c := &TabledataListCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- c.tableId = tableId
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of results to return
-func (c *TabledataListCall) MaxResults(maxResults int64) *TabledataListCall {
- c.opt_["maxResults"] = maxResults
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Page token,
-// returned by a previous call, identifying the result set
-func (c *TabledataListCall) PageToken(pageToken string) *TabledataListCall {
- c.opt_["pageToken"] = pageToken
- return c
-}
-
-// StartIndex sets the optional parameter "startIndex": Zero-based index
-// of the starting row to read
-func (c *TabledataListCall) StartIndex(startIndex uint64) *TabledataListCall {
- c.opt_["startIndex"] = startIndex
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *TabledataListCall) Fields(s ...googleapi.Field) *TabledataListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *TabledataListCall) Do() (*TableDataList, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["maxResults"]; ok {
- params.Set("maxResults", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["pageToken"]; ok {
- params.Set("pageToken", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["startIndex"]; ok {
- params.Set("startIndex", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- "tableId": c.tableId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *TableDataList
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves table data from a specified set of rows. Requires the READER dataset role.",
- // "httpMethod": "GET",
- // "id": "bigquery.tabledata.list",
- // "parameterOrder": [
- // "projectId",
- // "datasetId",
- // "tableId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the table to read",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "maxResults": {
- // "description": "Maximum number of results to return",
- // "format": "uint32",
- // "location": "query",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "Page token, returned by a previous call, identifying the result set",
- // "location": "query",
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the table to read",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "startIndex": {
- // "description": "Zero-based index of the starting row to read",
- // "format": "uint64",
- // "location": "query",
- // "type": "string"
- // },
- // "tableId": {
- // "description": "Table ID of the table to read",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}/data",
- // "response": {
- // "$ref": "TableDataList"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.tables.delete":
-
-type TablesDeleteCall struct {
- s *Service
- projectId string
- datasetId string
- tableId string
- opt_ map[string]interface{}
-}
-
-// Delete: Deletes the table specified by tableId from the dataset. If
-// the table contains data, all the data will be deleted.
-func (r *TablesService) Delete(projectId string, datasetId string, tableId string) *TablesDeleteCall {
- c := &TablesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- c.tableId = tableId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *TablesDeleteCall) Fields(s ...googleapi.Field) *TablesDeleteCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *TablesDeleteCall) Do() error {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- "tableId": c.tableId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return err
- }
- return nil
- // {
- // "description": "Deletes the table specified by tableId from the dataset. If the table contains data, all the data will be deleted.",
- // "httpMethod": "DELETE",
- // "id": "bigquery.tables.delete",
- // "parameterOrder": [
- // "projectId",
- // "datasetId",
- // "tableId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the table to delete",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the table to delete",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "tableId": {
- // "description": "Table ID of the table to delete",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.tables.get":
-
-type TablesGetCall struct {
- s *Service
- projectId string
- datasetId string
- tableId string
- opt_ map[string]interface{}
-}
-
-// Get: Gets the specified table resource by table ID. This method does
-// not return the data in the table, it only returns the table resource,
-// which describes the structure of this table.
-func (r *TablesService) Get(projectId string, datasetId string, tableId string) *TablesGetCall {
- c := &TablesGetCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- c.tableId = tableId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *TablesGetCall) Fields(s ...googleapi.Field) *TablesGetCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *TablesGetCall) Do() (*Table, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- "tableId": c.tableId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Table
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Gets the specified table resource by table ID. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table.",
- // "httpMethod": "GET",
- // "id": "bigquery.tables.get",
- // "parameterOrder": [
- // "projectId",
- // "datasetId",
- // "tableId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the requested table",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the requested table",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "tableId": {
- // "description": "Table ID of the requested table",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
- // "response": {
- // "$ref": "Table"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.tables.insert":
-
-type TablesInsertCall struct {
- s *Service
- projectId string
- datasetId string
- table *Table
- opt_ map[string]interface{}
-}
-
-// Insert: Creates a new, empty table in the dataset.
-func (r *TablesService) Insert(projectId string, datasetId string, table *Table) *TablesInsertCall {
- c := &TablesInsertCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- c.table = table
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *TablesInsertCall) Fields(s ...googleapi.Field) *TablesInsertCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *TablesInsertCall) Do() (*Table, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.table)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Table
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new, empty table in the dataset.",
- // "httpMethod": "POST",
- // "id": "bigquery.tables.insert",
- // "parameterOrder": [
- // "projectId",
- // "datasetId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the new table",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the new table",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}/tables",
- // "request": {
- // "$ref": "Table"
- // },
- // "response": {
- // "$ref": "Table"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.tables.list":
-
-type TablesListCall struct {
- s *Service
- projectId string
- datasetId string
- opt_ map[string]interface{}
-}
-
-// List: Lists all tables in the specified dataset. Requires the READER
-// dataset role.
-func (r *TablesService) List(projectId string, datasetId string) *TablesListCall {
- c := &TablesListCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": Maximum number
-// of results to return
-func (c *TablesListCall) MaxResults(maxResults int64) *TablesListCall {
- c.opt_["maxResults"] = maxResults
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Page token,
-// returned by a previous call, to request the next page of results
-func (c *TablesListCall) PageToken(pageToken string) *TablesListCall {
- c.opt_["pageToken"] = pageToken
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *TablesListCall) Fields(s ...googleapi.Field) *TablesListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *TablesListCall) Do() (*TableList, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["maxResults"]; ok {
- params.Set("maxResults", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["pageToken"]; ok {
- params.Set("pageToken", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *TableList
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists all tables in the specified dataset. Requires the READER dataset role.",
- // "httpMethod": "GET",
- // "id": "bigquery.tables.list",
- // "parameterOrder": [
- // "projectId",
- // "datasetId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the tables to list",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "maxResults": {
- // "description": "Maximum number of results to return",
- // "format": "uint32",
- // "location": "query",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "Page token, returned by a previous call, to request the next page of results",
- // "location": "query",
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the tables to list",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}/tables",
- // "response": {
- // "$ref": "TableList"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.tables.patch":
-
-type TablesPatchCall struct {
- s *Service
- projectId string
- datasetId string
- tableId string
- table *Table
- opt_ map[string]interface{}
-}
-
-// Patch: Updates information in an existing table. The update method
-// replaces the entire table resource, whereas the patch method only
-// replaces fields that are provided in the submitted table resource.
-// This method supports patch semantics.
-func (r *TablesService) Patch(projectId string, datasetId string, tableId string, table *Table) *TablesPatchCall {
- c := &TablesPatchCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- c.tableId = tableId
- c.table = table
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *TablesPatchCall) Fields(s ...googleapi.Field) *TablesPatchCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *TablesPatchCall) Do() (*Table, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.table)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("PATCH", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- "tableId": c.tableId,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Table
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource. This method supports patch semantics.",
- // "httpMethod": "PATCH",
- // "id": "bigquery.tables.patch",
- // "parameterOrder": [
- // "projectId",
- // "datasetId",
- // "tableId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the table to update",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the table to update",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "tableId": {
- // "description": "Table ID of the table to update",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
- // "request": {
- // "$ref": "Table"
- // },
- // "response": {
- // "$ref": "Table"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "bigquery.tables.update":
-
-type TablesUpdateCall struct {
- s *Service
- projectId string
- datasetId string
- tableId string
- table *Table
- opt_ map[string]interface{}
-}
-
-// Update: Updates information in an existing table. The update method
-// replaces the entire table resource, whereas the patch method only
-// replaces fields that are provided in the submitted table resource.
-func (r *TablesService) Update(projectId string, datasetId string, tableId string, table *Table) *TablesUpdateCall {
- c := &TablesUpdateCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.datasetId = datasetId
- c.tableId = tableId
- c.table = table
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *TablesUpdateCall) Fields(s ...googleapi.Field) *TablesUpdateCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *TablesUpdateCall) Do() (*Table, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.table)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/datasets/{datasetId}/tables/{tableId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "datasetId": c.datasetId,
- "tableId": c.tableId,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Table
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Updates information in an existing table. The update method replaces the entire table resource, whereas the patch method only replaces fields that are provided in the submitted table resource.",
- // "httpMethod": "PUT",
- // "id": "bigquery.tables.update",
- // "parameterOrder": [
- // "projectId",
- // "datasetId",
- // "tableId"
- // ],
- // "parameters": {
- // "datasetId": {
- // "description": "Dataset ID of the table to update",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "Project ID of the table to update",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "tableId": {
- // "description": "Table ID of the table to update",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{projectId}/datasets/{datasetId}/tables/{tableId}",
- // "request": {
- // "$ref": "Table"
- // },
- // "response": {
- // "$ref": "Table"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/bigquery",
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
diff --git a/vendor/google.golang.org/api/container/v1beta1/container-api.json b/vendor/google.golang.org/api/container/v1beta1/container-api.json
deleted file mode 100644
index 18ef43d..0000000
--- a/vendor/google.golang.org/api/container/v1beta1/container-api.json
+++ /dev/null
@@ -1,621 +0,0 @@
-{
- "kind": "discovery#restDescription",
- "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/ReRXGEgk9TcyLgT1qFhzuzuEb7E\"",
- "discoveryVersion": "v1",
- "id": "container:v1beta1",
- "name": "container",
- "version": "v1beta1",
- "revision": "20150504",
- "title": "Google Container Engine API",
- "description": "The Google Container Engine API is used for building and managing container based applications, powered by the open source Kubernetes technology.",
- "ownerDomain": "google.com",
- "ownerName": "Google",
- "icons": {
- "x16": "http://www.google.com/images/icons/product/search-16.gif",
- "x32": "http://www.google.com/images/icons/product/search-32.gif"
- },
- "documentationLink": "https://cloud.google.com/container-engine/docs/v1beta1/",
- "protocol": "rest",
- "baseUrl": "https://www.googleapis.com/container/v1beta1/projects/",
- "basePath": "/container/v1beta1/projects/",
- "rootUrl": "https://www.googleapis.com/",
- "servicePath": "container/v1beta1/projects/",
- "batchPath": "batch",
- "parameters": {
- "alt": {
- "type": "string",
- "description": "Data format for the response.",
- "default": "json",
- "enum": [
- "json"
- ],
- "enumDescriptions": [
- "Responses with Content-Type of application/json"
- ],
- "location": "query"
- },
- "fields": {
- "type": "string",
- "description": "Selector specifying which fields to include in a partial response.",
- "location": "query"
- },
- "key": {
- "type": "string",
- "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
- "location": "query"
- },
- "oauth_token": {
- "type": "string",
- "description": "OAuth 2.0 token for the current user.",
- "location": "query"
- },
- "prettyPrint": {
- "type": "boolean",
- "description": "Returns response with indentations and line breaks.",
- "default": "true",
- "location": "query"
- },
- "quotaUser": {
- "type": "string",
- "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
- "location": "query"
- },
- "userIp": {
- "type": "string",
- "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
- "location": "query"
- }
- },
- "auth": {
- "oauth2": {
- "scopes": {
- "https://www.googleapis.com/auth/cloud-platform": {
- "description": "View and manage your data across Google Cloud Platform services"
- }
- }
- }
- },
- "schemas": {
- "Cluster": {
- "id": "Cluster",
- "type": "object",
- "properties": {
- "clusterApiVersion": {
- "type": "string",
- "description": "The API version of the Kubernetes master and kubelets running in this cluster. Leave blank to pick up the latest stable release, or specify a version of the form \"x.y.z\". The Google Container Engine release notes lists the currently supported versions. If an incorrect version is specified, the server returns an error listing the currently supported versions."
- },
- "containerIpv4Cidr": {
- "type": "string",
- "description": "The IP address range of the container pods in this cluster, in CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify a /14 block in 10.0.0.0/8 or 172.16.0.0/12."
- },
- "creationTimestamp": {
- "type": "string",
- "description": "[Output only] The time the cluster was created, in RFC3339 text format."
- },
- "description": {
- "type": "string",
- "description": "An optional description of this cluster."
- },
- "enableCloudLogging": {
- "type": "boolean",
- "description": "Whether logs from the cluster should be made available via the Google Cloud Logging service. This includes both logs from your applications running in the cluster as well as logs from the Kubernetes components themselves."
- },
- "endpoint": {
- "type": "string",
- "description": "[Output only] The IP address of this cluster's Kubernetes master. The endpoint can be accessed from the internet at https://username:password@endpoint/.\n\nSee the masterAuth property of this resource for username and password information."
- },
- "masterAuth": {
- "$ref": "MasterAuth",
- "description": "The authentication information for accessing the master."
- },
- "name": {
- "type": "string",
- "description": "The name of this cluster. The name must be unique within this project and zone, and can be up to 40 characters with the following restrictions: \n- Lowercase letters, numbers, and hyphens only.\n- Must start with a letter.\n- Must end with a number or a letter."
- },
- "network": {
- "type": "string",
- "description": "The name of the Google Compute Engine network to which the cluster is connected."
- },
- "nodeConfig": {
- "$ref": "NodeConfig",
- "description": "The machine type and image to use for all nodes in this cluster. See the descriptions of the child properties of nodeConfig."
- },
- "nodeRoutingPrefixSize": {
- "type": "integer",
- "description": "[Output only] The size of the address space on each node for hosting containers.",
- "format": "int32"
- },
- "numNodes": {
- "type": "integer",
- "description": "The number of nodes to create in this cluster. You must ensure that your Compute Engine resource quota is sufficient for this number of instances plus one (to include the master). You must also have available firewall and routes quota.",
- "format": "int32"
- },
- "selfLink": {
- "type": "string",
- "description": "[Output only] Server-defined URL for the resource."
- },
- "servicesIpv4Cidr": {
- "type": "string",
- "description": "[Output only] The IP address range of the Kubernetes services in this cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from the container CIDR."
- },
- "status": {
- "type": "string",
- "description": "[Output only] The current status of this cluster.",
- "enum": [
- "error",
- "provisioning",
- "running",
- "stopping"
- ],
- "enumDescriptions": [
- "",
- "",
- "",
- ""
- ]
- },
- "statusMessage": {
- "type": "string",
- "description": "[Output only] Additional information about the current status of this cluster, if available."
- },
- "zone": {
- "type": "string",
- "description": "[Output only] The name of the Google Compute Engine zone in which the cluster resides."
- }
- }
- },
- "CreateClusterRequest": {
- "id": "CreateClusterRequest",
- "type": "object",
- "properties": {
- "cluster": {
- "$ref": "Cluster",
- "description": "A cluster resource."
- }
- }
- },
- "ListAggregatedClustersResponse": {
- "id": "ListAggregatedClustersResponse",
- "type": "object",
- "properties": {
- "clusters": {
- "type": "array",
- "description": "A list of clusters in the project, across all zones.",
- "items": {
- "$ref": "Cluster"
- }
- }
- }
- },
- "ListAggregatedOperationsResponse": {
- "id": "ListAggregatedOperationsResponse",
- "type": "object",
- "properties": {
- "operations": {
- "type": "array",
- "description": "A list of operations in the project, across all zones.",
- "items": {
- "$ref": "Operation"
- }
- }
- }
- },
- "ListClustersResponse": {
- "id": "ListClustersResponse",
- "type": "object",
- "properties": {
- "clusters": {
- "type": "array",
- "description": "A list of clusters in the project in the specified zone.",
- "items": {
- "$ref": "Cluster"
- }
- }
- }
- },
- "ListOperationsResponse": {
- "id": "ListOperationsResponse",
- "type": "object",
- "properties": {
- "operations": {
- "type": "array",
- "description": "A list of operations in the project in the specified zone.",
- "items": {
- "$ref": "Operation"
- }
- }
- }
- },
- "MasterAuth": {
- "id": "MasterAuth",
- "type": "object",
- "description": "The authentication information for accessing the master. Authentication is either done using HTTP basic authentication or using a bearer token.",
- "properties": {
- "bearerToken": {
- "type": "string",
- "description": "The token used to authenticate API requests to the master. The token is to be included in an HTTP Authorization Header in all requests to the master endpoint. The format of the header is: \"Authorization: Bearer \"."
- },
- "password": {
- "type": "string",
- "description": "The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint. Because the master endpoint is open to the internet, you should create a strong password."
- },
- "user": {
- "type": "string",
- "description": "The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint."
- }
- }
- },
- "NodeConfig": {
- "id": "NodeConfig",
- "type": "object",
- "properties": {
- "machineType": {
- "type": "string",
- "description": "The name of a Google Compute Engine machine type (e.g. n1-standard-1).\n\nIf unspecified, the default machine type is n1-standard-1."
- },
- "serviceAccounts": {
- "type": "array",
- "description": "The optional list of ServiceAccounts, each with their specified scopes, to be made available on all of the node VMs. In addition to the service accounts and scopes specified, the \"default\" account will always be created with the following scopes to ensure the correct functioning of the cluster: \n- https://www.googleapis.com/auth/compute,\n- https://www.googleapis.com/auth/devstorage.read_only",
- "items": {
- "$ref": "ServiceAccount"
- }
- },
- "sourceImage": {
- "type": "string",
- "description": "The fully-specified name of a Google Compute Engine image. For example: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-vYYYYMMDD (where YYYMMDD is the version date).\n\nIf specifying an image, you are responsible for ensuring its compatibility with the Debian 7 backports image. We recommend leaving this field blank to accept the default backports-debian-7-wheezy value."
- }
- }
- },
- "Operation": {
- "id": "Operation",
- "type": "object",
- "description": "Defines the operation resource. All fields are output only.",
- "properties": {
- "errorMessage": {
- "type": "string",
- "description": "If an error has occurred, a textual description of the error."
- },
- "name": {
- "type": "string",
- "description": "The server-assigned ID for the operation."
- },
- "operationType": {
- "type": "string",
- "description": "The operation type.",
- "enum": [
- "createCluster",
- "deleteCluster"
- ],
- "enumDescriptions": [
- "",
- ""
- ]
- },
- "selfLink": {
- "type": "string",
- "description": "Server-defined URL for the resource."
- },
- "status": {
- "type": "string",
- "description": "The current status of the operation.",
- "enum": [
- "done",
- "pending",
- "running"
- ],
- "enumDescriptions": [
- "",
- "",
- ""
- ]
- },
- "target": {
- "type": "string",
- "description": "[Optional] The URL of the cluster resource that this operation is associated with."
- },
- "targetLink": {
- "type": "string",
- "description": "Server-defined URL for the target of the operation."
- },
- "zone": {
- "type": "string",
- "description": "The name of the Google Compute Engine zone in which the operation is taking place."
- }
- }
- },
- "ServiceAccount": {
- "id": "ServiceAccount",
- "type": "object",
- "description": "A Compute Engine service account.",
- "properties": {
- "email": {
- "type": "string",
- "description": "Email address of the service account."
- },
- "scopes": {
- "type": "array",
- "description": "The list of scopes to be made available for this service account.",
- "items": {
- "type": "string"
- }
- }
- }
- }
- },
- "resources": {
- "projects": {
- "resources": {
- "clusters": {
- "methods": {
- "list": {
- "id": "container.projects.clusters.list",
- "path": "{projectId}/clusters",
- "httpMethod": "GET",
- "description": "Lists all clusters owned by a project across all zones.",
- "parameters": {
- "projectId": {
- "type": "string",
- "description": "The Google Developers Console project ID or project number.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId"
- ],
- "response": {
- "$ref": "ListAggregatedClustersResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- }
- }
- },
- "operations": {
- "methods": {
- "list": {
- "id": "container.projects.operations.list",
- "path": "{projectId}/operations",
- "httpMethod": "GET",
- "description": "Lists all operations in a project, across all zones.",
- "parameters": {
- "projectId": {
- "type": "string",
- "description": "The Google Developers Console project ID or project number.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId"
- ],
- "response": {
- "$ref": "ListAggregatedOperationsResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- }
- }
- },
- "zones": {
- "resources": {
- "clusters": {
- "methods": {
- "create": {
- "id": "container.projects.zones.clusters.create",
- "path": "{projectId}/zones/{zoneId}/clusters",
- "httpMethod": "POST",
- "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master instance.\n\nThe cluster is created in the project's default network.\n\nA firewall is added that allows traffic into port 443 on the master, which enables HTTPS. A firewall and a route is added for each node to allow the containers on that node to communicate with all other instances in the cluster.\n\nFinally, an entry is added to the project's global metadata indicating which CIDR range is being used by the cluster.",
- "parameters": {
- "projectId": {
- "type": "string",
- "description": "The Google Developers Console project ID or project number.",
- "required": true,
- "location": "path"
- },
- "zoneId": {
- "type": "string",
- "description": "The name of the Google Compute Engine zone in which the cluster resides.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "zoneId"
- ],
- "request": {
- "$ref": "CreateClusterRequest"
- },
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "delete": {
- "id": "container.projects.zones.clusters.delete",
- "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
- "httpMethod": "DELETE",
- "description": "Deletes the cluster, including the Kubernetes master and all worker nodes.\n\nFirewalls and routes that were configured at cluster creation are also deleted.",
- "parameters": {
- "clusterId": {
- "type": "string",
- "description": "The name of the cluster to delete.",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "The Google Developers Console project ID or project number.",
- "required": true,
- "location": "path"
- },
- "zoneId": {
- "type": "string",
- "description": "The name of the Google Compute Engine zone in which the cluster resides.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "zoneId",
- "clusterId"
- ],
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "get": {
- "id": "container.projects.zones.clusters.get",
- "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
- "httpMethod": "GET",
- "description": "Gets a specific cluster.",
- "parameters": {
- "clusterId": {
- "type": "string",
- "description": "The name of the cluster to retrieve.",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "The Google Developers Console project ID or project number.",
- "required": true,
- "location": "path"
- },
- "zoneId": {
- "type": "string",
- "description": "The name of the Google Compute Engine zone in which the cluster resides.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "zoneId",
- "clusterId"
- ],
- "response": {
- "$ref": "Cluster"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "list": {
- "id": "container.projects.zones.clusters.list",
- "path": "{projectId}/zones/{zoneId}/clusters",
- "httpMethod": "GET",
- "description": "Lists all clusters owned by a project in the specified zone.",
- "parameters": {
- "projectId": {
- "type": "string",
- "description": "The Google Developers Console project ID or project number.",
- "required": true,
- "location": "path"
- },
- "zoneId": {
- "type": "string",
- "description": "The name of the Google Compute Engine zone in which the cluster resides.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "zoneId"
- ],
- "response": {
- "$ref": "ListClustersResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- }
- }
- },
- "operations": {
- "methods": {
- "get": {
- "id": "container.projects.zones.operations.get",
- "path": "{projectId}/zones/{zoneId}/operations/{operationId}",
- "httpMethod": "GET",
- "description": "Gets the specified operation.",
- "parameters": {
- "operationId": {
- "type": "string",
- "description": "The server-assigned name of the operation.",
- "required": true,
- "location": "path"
- },
- "projectId": {
- "type": "string",
- "description": "The Google Developers Console project ID or project number.",
- "required": true,
- "location": "path"
- },
- "zoneId": {
- "type": "string",
- "description": "The name of the Google Compute Engine zone in which the operation resides. This is always the same zone as the cluster with which the operation is associated.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "zoneId",
- "operationId"
- ],
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "list": {
- "id": "container.projects.zones.operations.list",
- "path": "{projectId}/zones/{zoneId}/operations",
- "httpMethod": "GET",
- "description": "Lists all operations in a project in a specific zone.",
- "parameters": {
- "projectId": {
- "type": "string",
- "description": "The Google Developers Console project ID or project number.",
- "required": true,
- "location": "path"
- },
- "zoneId": {
- "type": "string",
- "description": "The name of the Google Compute Engine zone to return operations for.",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "projectId",
- "zoneId"
- ],
- "response": {
- "$ref": "ListOperationsResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- }
- }
- }
- }
- }
- }
- }
- }
-}
diff --git a/vendor/google.golang.org/api/container/v1beta1/container-gen.go b/vendor/google.golang.org/api/container/v1beta1/container-gen.go
deleted file mode 100644
index 17ea4cc..0000000
--- a/vendor/google.golang.org/api/container/v1beta1/container-gen.go
+++ /dev/null
@@ -1,1077 +0,0 @@
-// Package container provides access to the Google Container Engine API.
-//
-// See https://cloud.google.com/container-engine/docs/v1beta1/
-//
-// Usage example:
-//
-// import "google.golang.org/api/container/v1beta1"
-// ...
-// containerService, err := container.New(oauthHttpClient)
-package container
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "golang.org/x/net/context"
- "google.golang.org/api/googleapi"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-)
-
-// Always reference these packages, just in case the auto-generated code
-// below doesn't.
-var _ = bytes.NewBuffer
-var _ = strconv.Itoa
-var _ = fmt.Sprintf
-var _ = json.NewDecoder
-var _ = io.Copy
-var _ = url.Parse
-var _ = googleapi.Version
-var _ = errors.New
-var _ = strings.Replace
-var _ = context.Background
-
-const apiId = "container:v1beta1"
-const apiName = "container"
-const apiVersion = "v1beta1"
-const basePath = "https://www.googleapis.com/container/v1beta1/projects/"
-
-// OAuth2 scopes used by this API.
-const (
- // View and manage your data across Google Cloud Platform services
- CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
-)
-
-func New(client *http.Client) (*Service, error) {
- if client == nil {
- return nil, errors.New("client is nil")
- }
- s := &Service{client: client, BasePath: basePath}
- s.Projects = NewProjectsService(s)
- return s, nil
-}
-
-type Service struct {
- client *http.Client
- BasePath string // API endpoint base URL
- UserAgent string // optional additional User-Agent fragment
-
- Projects *ProjectsService
-}
-
-func (s *Service) userAgent() string {
- if s.UserAgent == "" {
- return googleapi.UserAgent
- }
- return googleapi.UserAgent + " " + s.UserAgent
-}
-
-func NewProjectsService(s *Service) *ProjectsService {
- rs := &ProjectsService{s: s}
- rs.Clusters = NewProjectsClustersService(s)
- rs.Operations = NewProjectsOperationsService(s)
- rs.Zones = NewProjectsZonesService(s)
- return rs
-}
-
-type ProjectsService struct {
- s *Service
-
- Clusters *ProjectsClustersService
-
- Operations *ProjectsOperationsService
-
- Zones *ProjectsZonesService
-}
-
-func NewProjectsClustersService(s *Service) *ProjectsClustersService {
- rs := &ProjectsClustersService{s: s}
- return rs
-}
-
-type ProjectsClustersService struct {
- s *Service
-}
-
-func NewProjectsOperationsService(s *Service) *ProjectsOperationsService {
- rs := &ProjectsOperationsService{s: s}
- return rs
-}
-
-type ProjectsOperationsService struct {
- s *Service
-}
-
-func NewProjectsZonesService(s *Service) *ProjectsZonesService {
- rs := &ProjectsZonesService{s: s}
- rs.Clusters = NewProjectsZonesClustersService(s)
- rs.Operations = NewProjectsZonesOperationsService(s)
- return rs
-}
-
-type ProjectsZonesService struct {
- s *Service
-
- Clusters *ProjectsZonesClustersService
-
- Operations *ProjectsZonesOperationsService
-}
-
-func NewProjectsZonesClustersService(s *Service) *ProjectsZonesClustersService {
- rs := &ProjectsZonesClustersService{s: s}
- return rs
-}
-
-type ProjectsZonesClustersService struct {
- s *Service
-}
-
-func NewProjectsZonesOperationsService(s *Service) *ProjectsZonesOperationsService {
- rs := &ProjectsZonesOperationsService{s: s}
- return rs
-}
-
-type ProjectsZonesOperationsService struct {
- s *Service
-}
-
-type Cluster struct {
- // ClusterApiVersion: The API version of the Kubernetes master and
- // kubelets running in this cluster. Leave blank to pick up the latest
- // stable release, or specify a version of the form "x.y.z". The Google
- // Container Engine release notes lists the currently supported
- // versions. If an incorrect version is specified, the server returns an
- // error listing the currently supported versions.
- ClusterApiVersion string `json:"clusterApiVersion,omitempty"`
-
- // ContainerIpv4Cidr: The IP address range of the container pods in this
- // cluster, in CIDR notation (e.g. 10.96.0.0/14). Leave blank to have
- // one automatically chosen or specify a /14 block in 10.0.0.0/8 or
- // 172.16.0.0/12.
- ContainerIpv4Cidr string `json:"containerIpv4Cidr,omitempty"`
-
- // CreationTimestamp: [Output only] The time the cluster was created, in
- // RFC3339 text format.
- CreationTimestamp string `json:"creationTimestamp,omitempty"`
-
- // Description: An optional description of this cluster.
- Description string `json:"description,omitempty"`
-
- // EnableCloudLogging: Whether logs from the cluster should be made
- // available via the Google Cloud Logging service. This includes both
- // logs from your applications running in the cluster as well as logs
- // from the Kubernetes components themselves.
- EnableCloudLogging bool `json:"enableCloudLogging,omitempty"`
-
- // Endpoint: [Output only] The IP address of this cluster's Kubernetes
- // master. The endpoint can be accessed from the internet at
- // https://username:password@endpoint/.
- //
- // See the masterAuth property of this resource for username and
- // password information.
- Endpoint string `json:"endpoint,omitempty"`
-
- // MasterAuth: The authentication information for accessing the master.
- MasterAuth *MasterAuth `json:"masterAuth,omitempty"`
-
- // Name: The name of this cluster. The name must be unique within this
- // project and zone, and can be up to 40 characters with the following
- // restrictions:
- // - Lowercase letters, numbers, and hyphens only.
- // - Must start with a letter.
- // - Must end with a number or a letter.
- Name string `json:"name,omitempty"`
-
- // Network: The name of the Google Compute Engine network to which the
- // cluster is connected.
- Network string `json:"network,omitempty"`
-
- // NodeConfig: The machine type and image to use for all nodes in this
- // cluster. See the descriptions of the child properties of nodeConfig.
- NodeConfig *NodeConfig `json:"nodeConfig,omitempty"`
-
- // NodeRoutingPrefixSize: [Output only] The size of the address space on
- // each node for hosting containers.
- NodeRoutingPrefixSize int64 `json:"nodeRoutingPrefixSize,omitempty"`
-
- // NumNodes: The number of nodes to create in this cluster. You must
- // ensure that your Compute Engine resource quota is sufficient for this
- // number of instances plus one (to include the master). You must also
- // have available firewall and routes quota.
- NumNodes int64 `json:"numNodes,omitempty"`
-
- // SelfLink: [Output only] Server-defined URL for the resource.
- SelfLink string `json:"selfLink,omitempty"`
-
- // ServicesIpv4Cidr: [Output only] The IP address range of the
- // Kubernetes services in this cluster, in CIDR notation (e.g.
- // 1.2.3.4/29). Service addresses are typically put in the last /16 from
- // the container CIDR.
- ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"`
-
- // Status: [Output only] The current status of this cluster.
- //
- // Possible values:
- // "error"
- // "provisioning"
- // "running"
- // "stopping"
- Status string `json:"status,omitempty"`
-
- // StatusMessage: [Output only] Additional information about the current
- // status of this cluster, if available.
- StatusMessage string `json:"statusMessage,omitempty"`
-
- // Zone: [Output only] The name of the Google Compute Engine zone in
- // which the cluster resides.
- Zone string `json:"zone,omitempty"`
-}
-
-type CreateClusterRequest struct {
- // Cluster: A cluster resource.
- Cluster *Cluster `json:"cluster,omitempty"`
-}
-
-type ListAggregatedClustersResponse struct {
- // Clusters: A list of clusters in the project, across all zones.
- Clusters []*Cluster `json:"clusters,omitempty"`
-}
-
-type ListAggregatedOperationsResponse struct {
- // Operations: A list of operations in the project, across all zones.
- Operations []*Operation `json:"operations,omitempty"`
-}
-
-type ListClustersResponse struct {
- // Clusters: A list of clusters in the project in the specified zone.
- Clusters []*Cluster `json:"clusters,omitempty"`
-}
-
-type ListOperationsResponse struct {
- // Operations: A list of operations in the project in the specified
- // zone.
- Operations []*Operation `json:"operations,omitempty"`
-}
-
-type MasterAuth struct {
- // BearerToken: The token used to authenticate API requests to the
- // master. The token is to be included in an HTTP Authorization Header
- // in all requests to the master endpoint. The format of the header is:
- // "Authorization: Bearer ".
- BearerToken string `json:"bearerToken,omitempty"`
-
- // Password: The password to use for HTTP basic authentication when
- // accessing the Kubernetes master endpoint. Because the master endpoint
- // is open to the internet, you should create a strong password.
- Password string `json:"password,omitempty"`
-
- // User: The username to use for HTTP basic authentication when
- // accessing the Kubernetes master endpoint.
- User string `json:"user,omitempty"`
-}
-
-type NodeConfig struct {
- // MachineType: The name of a Google Compute Engine machine type (e.g.
- // n1-standard-1).
- //
- // If unspecified, the default machine type is n1-standard-1.
- MachineType string `json:"machineType,omitempty"`
-
- // ServiceAccounts: The optional list of ServiceAccounts, each with
- // their specified scopes, to be made available on all of the node VMs.
- // In addition to the service accounts and scopes specified, the
- // "default" account will always be created with the following scopes to
- // ensure the correct functioning of the cluster:
- // - https://www.googleapis.com/auth/compute,
- // - https://www.googleapis.com/auth/devstorage.read_only
- ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"`
-
- // SourceImage: The fully-specified name of a Google Compute Engine
- // image. For example:
- // https://www.googleapis.com/compute/v1/projects/debian-cloud/global/ima
- // ges/backports-debian-7-wheezy-vYYYYMMDD (where YYYMMDD is the version
- // date).
- //
- // If specifying an image, you are responsible for ensuring its
- // compatibility with the Debian 7 backports image. We recommend leaving
- // this field blank to accept the default backports-debian-7-wheezy
- // value.
- SourceImage string `json:"sourceImage,omitempty"`
-}
-
-type Operation struct {
- // ErrorMessage: If an error has occurred, a textual description of the
- // error.
- ErrorMessage string `json:"errorMessage,omitempty"`
-
- // Name: The server-assigned ID for the operation.
- Name string `json:"name,omitempty"`
-
- // OperationType: The operation type.
- //
- // Possible values:
- // "createCluster"
- // "deleteCluster"
- OperationType string `json:"operationType,omitempty"`
-
- // SelfLink: Server-defined URL for the resource.
- SelfLink string `json:"selfLink,omitempty"`
-
- // Status: The current status of the operation.
- //
- // Possible values:
- // "done"
- // "pending"
- // "running"
- Status string `json:"status,omitempty"`
-
- // Target: [Optional] The URL of the cluster resource that this
- // operation is associated with.
- Target string `json:"target,omitempty"`
-
- // TargetLink: Server-defined URL for the target of the operation.
- TargetLink string `json:"targetLink,omitempty"`
-
- // Zone: The name of the Google Compute Engine zone in which the
- // operation is taking place.
- Zone string `json:"zone,omitempty"`
-}
-
-type ServiceAccount struct {
- // Email: Email address of the service account.
- Email string `json:"email,omitempty"`
-
- // Scopes: The list of scopes to be made available for this service
- // account.
- Scopes []string `json:"scopes,omitempty"`
-}
-
-// method id "container.projects.clusters.list":
-
-type ProjectsClustersListCall struct {
- s *Service
- projectId string
- opt_ map[string]interface{}
-}
-
-// List: Lists all clusters owned by a project across all zones.
-func (r *ProjectsClustersService) List(projectId string) *ProjectsClustersListCall {
- c := &ProjectsClustersListCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsClustersListCall) Fields(s ...googleapi.Field) *ProjectsClustersListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsClustersListCall) Do() (*ListAggregatedClustersResponse, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/clusters")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *ListAggregatedClustersResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists all clusters owned by a project across all zones.",
- // "httpMethod": "GET",
- // "id": "container.projects.clusters.list",
- // "parameterOrder": [
- // "projectId"
- // ],
- // "parameters": {
- // "projectId": {
- // "description": "The Google Developers Console project ID or project number.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{projectId}/clusters",
- // "response": {
- // "$ref": "ListAggregatedClustersResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "container.projects.operations.list":
-
-type ProjectsOperationsListCall struct {
- s *Service
- projectId string
- opt_ map[string]interface{}
-}
-
-// List: Lists all operations in a project, across all zones.
-func (r *ProjectsOperationsService) List(projectId string) *ProjectsOperationsListCall {
- c := &ProjectsOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsOperationsListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsOperationsListCall) Do() (*ListAggregatedOperationsResponse, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/operations")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *ListAggregatedOperationsResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists all operations in a project, across all zones.",
- // "httpMethod": "GET",
- // "id": "container.projects.operations.list",
- // "parameterOrder": [
- // "projectId"
- // ],
- // "parameters": {
- // "projectId": {
- // "description": "The Google Developers Console project ID or project number.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{projectId}/operations",
- // "response": {
- // "$ref": "ListAggregatedOperationsResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "container.projects.zones.clusters.create":
-
-type ProjectsZonesClustersCreateCall struct {
- s *Service
- projectId string
- zoneId string
- createclusterrequest *CreateClusterRequest
- opt_ map[string]interface{}
-}
-
-// Create: Creates a cluster, consisting of the specified number and
-// type of Google Compute Engine instances, plus a Kubernetes master
-// instance.
-//
-// The cluster is created in the project's default network.
-//
-// A firewall is added that allows traffic into port 443 on the master,
-// which enables HTTPS. A firewall and a route is added for each node to
-// allow the containers on that node to communicate with all other
-// instances in the cluster.
-//
-// Finally, an entry is added to the project's global metadata
-// indicating which CIDR range is being used by the cluster.
-func (r *ProjectsZonesClustersService) Create(projectId string, zoneId string, createclusterrequest *CreateClusterRequest) *ProjectsZonesClustersCreateCall {
- c := &ProjectsZonesClustersCreateCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.zoneId = zoneId
- c.createclusterrequest = createclusterrequest
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsZonesClustersCreateCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersCreateCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsZonesClustersCreateCall) Do() (*Operation, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.createclusterrequest)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "zoneId": c.zoneId,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Operation
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master instance.\n\nThe cluster is created in the project's default network.\n\nA firewall is added that allows traffic into port 443 on the master, which enables HTTPS. A firewall and a route is added for each node to allow the containers on that node to communicate with all other instances in the cluster.\n\nFinally, an entry is added to the project's global metadata indicating which CIDR range is being used by the cluster.",
- // "httpMethod": "POST",
- // "id": "container.projects.zones.clusters.create",
- // "parameterOrder": [
- // "projectId",
- // "zoneId"
- // ],
- // "parameters": {
- // "projectId": {
- // "description": "The Google Developers Console project ID or project number.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "zoneId": {
- // "description": "The name of the Google Compute Engine zone in which the cluster resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{projectId}/zones/{zoneId}/clusters",
- // "request": {
- // "$ref": "CreateClusterRequest"
- // },
- // "response": {
- // "$ref": "Operation"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "container.projects.zones.clusters.delete":
-
-type ProjectsZonesClustersDeleteCall struct {
- s *Service
- projectId string
- zoneId string
- clusterId string
- opt_ map[string]interface{}
-}
-
-// Delete: Deletes the cluster, including the Kubernetes master and all
-// worker nodes.
-//
-// Firewalls and routes that were configured at cluster creation are
-// also deleted.
-func (r *ProjectsZonesClustersService) Delete(projectId string, zoneId string, clusterId string) *ProjectsZonesClustersDeleteCall {
- c := &ProjectsZonesClustersDeleteCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.zoneId = zoneId
- c.clusterId = clusterId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsZonesClustersDeleteCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersDeleteCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsZonesClustersDeleteCall) Do() (*Operation, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters/{clusterId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "zoneId": c.zoneId,
- "clusterId": c.clusterId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Operation
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Deletes the cluster, including the Kubernetes master and all worker nodes.\n\nFirewalls and routes that were configured at cluster creation are also deleted.",
- // "httpMethod": "DELETE",
- // "id": "container.projects.zones.clusters.delete",
- // "parameterOrder": [
- // "projectId",
- // "zoneId",
- // "clusterId"
- // ],
- // "parameters": {
- // "clusterId": {
- // "description": "The name of the cluster to delete.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "The Google Developers Console project ID or project number.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "zoneId": {
- // "description": "The name of the Google Compute Engine zone in which the cluster resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
- // "response": {
- // "$ref": "Operation"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "container.projects.zones.clusters.get":
-
-type ProjectsZonesClustersGetCall struct {
- s *Service
- projectId string
- zoneId string
- clusterId string
- opt_ map[string]interface{}
-}
-
-// Get: Gets a specific cluster.
-func (r *ProjectsZonesClustersService) Get(projectId string, zoneId string, clusterId string) *ProjectsZonesClustersGetCall {
- c := &ProjectsZonesClustersGetCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.zoneId = zoneId
- c.clusterId = clusterId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsZonesClustersGetCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersGetCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsZonesClustersGetCall) Do() (*Cluster, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters/{clusterId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "zoneId": c.zoneId,
- "clusterId": c.clusterId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Cluster
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Gets a specific cluster.",
- // "httpMethod": "GET",
- // "id": "container.projects.zones.clusters.get",
- // "parameterOrder": [
- // "projectId",
- // "zoneId",
- // "clusterId"
- // ],
- // "parameters": {
- // "clusterId": {
- // "description": "The name of the cluster to retrieve.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "The Google Developers Console project ID or project number.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "zoneId": {
- // "description": "The name of the Google Compute Engine zone in which the cluster resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
- // "response": {
- // "$ref": "Cluster"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "container.projects.zones.clusters.list":
-
-type ProjectsZonesClustersListCall struct {
- s *Service
- projectId string
- zoneId string
- opt_ map[string]interface{}
-}
-
-// List: Lists all clusters owned by a project in the specified zone.
-func (r *ProjectsZonesClustersService) List(projectId string, zoneId string) *ProjectsZonesClustersListCall {
- c := &ProjectsZonesClustersListCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.zoneId = zoneId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsZonesClustersListCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsZonesClustersListCall) Do() (*ListClustersResponse, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "zoneId": c.zoneId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *ListClustersResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists all clusters owned by a project in the specified zone.",
- // "httpMethod": "GET",
- // "id": "container.projects.zones.clusters.list",
- // "parameterOrder": [
- // "projectId",
- // "zoneId"
- // ],
- // "parameters": {
- // "projectId": {
- // "description": "The Google Developers Console project ID or project number.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "zoneId": {
- // "description": "The name of the Google Compute Engine zone in which the cluster resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{projectId}/zones/{zoneId}/clusters",
- // "response": {
- // "$ref": "ListClustersResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "container.projects.zones.operations.get":
-
-type ProjectsZonesOperationsGetCall struct {
- s *Service
- projectId string
- zoneId string
- operationId string
- opt_ map[string]interface{}
-}
-
-// Get: Gets the specified operation.
-func (r *ProjectsZonesOperationsService) Get(projectId string, zoneId string, operationId string) *ProjectsZonesOperationsGetCall {
- c := &ProjectsZonesOperationsGetCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.zoneId = zoneId
- c.operationId = operationId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsZonesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsGetCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsZonesOperationsGetCall) Do() (*Operation, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/operations/{operationId}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "zoneId": c.zoneId,
- "operationId": c.operationId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Operation
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Gets the specified operation.",
- // "httpMethod": "GET",
- // "id": "container.projects.zones.operations.get",
- // "parameterOrder": [
- // "projectId",
- // "zoneId",
- // "operationId"
- // ],
- // "parameters": {
- // "operationId": {
- // "description": "The server-assigned name of the operation.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "projectId": {
- // "description": "The Google Developers Console project ID or project number.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "zoneId": {
- // "description": "The name of the Google Compute Engine zone in which the operation resides. This is always the same zone as the cluster with which the operation is associated.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{projectId}/zones/{zoneId}/operations/{operationId}",
- // "response": {
- // "$ref": "Operation"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
-
-// method id "container.projects.zones.operations.list":
-
-type ProjectsZonesOperationsListCall struct {
- s *Service
- projectId string
- zoneId string
- opt_ map[string]interface{}
-}
-
-// List: Lists all operations in a project in a specific zone.
-func (r *ProjectsZonesOperationsService) List(projectId string, zoneId string) *ProjectsZonesOperationsListCall {
- c := &ProjectsZonesOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
- c.projectId = projectId
- c.zoneId = zoneId
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsZonesOperationsListCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsZonesOperationsListCall) Do() (*ListOperationsResponse, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/operations")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "projectId": c.projectId,
- "zoneId": c.zoneId,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *ListOperationsResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists all operations in a project in a specific zone.",
- // "httpMethod": "GET",
- // "id": "container.projects.zones.operations.list",
- // "parameterOrder": [
- // "projectId",
- // "zoneId"
- // ],
- // "parameters": {
- // "projectId": {
- // "description": "The Google Developers Console project ID or project number.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "zoneId": {
- // "description": "The name of the Google Compute Engine zone to return operations for.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{projectId}/zones/{zoneId}/operations",
- // "response": {
- // "$ref": "ListOperationsResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform"
- // ]
- // }
-
-}
diff --git a/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-api.json b/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-api.json
deleted file mode 100644
index e0aa7cf..0000000
--- a/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-api.json
+++ /dev/null
@@ -1,679 +0,0 @@
-{
- "kind": "discovery#restDescription",
- "etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/k747AQVNKzUoa08QT-Z1GxOMZC0\"",
- "discoveryVersion": "v1",
- "id": "pubsub:v1beta2",
- "name": "pubsub",
- "version": "v1beta2",
- "revision": "20150326",
- "title": "Google Cloud Pub/Sub API",
- "description": "Provides reliable, many-to-many, asynchronous messaging between applications.",
- "ownerDomain": "google.com",
- "ownerName": "Google",
- "icons": {
- "x16": "http://www.google.com/images/icons/product/search-16.gif",
- "x32": "http://www.google.com/images/icons/product/search-32.gif"
- },
- "documentationLink": "",
- "protocol": "rest",
- "baseUrl": "https://pubsub.googleapis.com/v1beta2/",
- "basePath": "/v1beta2/",
- "rootUrl": "https://pubsub.googleapis.com/",
- "servicePath": "v1beta2/",
- "batchPath": "batch",
- "parameters": {
- "alt": {
- "type": "string",
- "description": "Data format for the response.",
- "default": "json",
- "enum": [
- "json"
- ],
- "enumDescriptions": [
- "Responses with Content-Type of application/json"
- ],
- "location": "query"
- },
- "fields": {
- "type": "string",
- "description": "Selector specifying which fields to include in a partial response.",
- "location": "query"
- },
- "key": {
- "type": "string",
- "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
- "location": "query"
- },
- "oauth_token": {
- "type": "string",
- "description": "OAuth 2.0 token for the current user.",
- "location": "query"
- },
- "prettyPrint": {
- "type": "boolean",
- "description": "Returns response with indentations and line breaks.",
- "default": "true",
- "location": "query"
- },
- "quotaUser": {
- "type": "string",
- "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
- "location": "query"
- },
- "userIp": {
- "type": "string",
- "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
- "location": "query"
- }
- },
- "auth": {
- "oauth2": {
- "scopes": {
- "https://www.googleapis.com/auth/cloud-platform": {
- "description": "View and manage your data across Google Cloud Platform services"
- },
- "https://www.googleapis.com/auth/pubsub": {
- "description": "View and manage Pub/Sub topics and subscriptions"
- }
- }
- }
- },
- "schemas": {
- "AcknowledgeRequest": {
- "id": "AcknowledgeRequest",
- "type": "object",
- "properties": {
- "ackIds": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "Empty": {
- "id": "Empty",
- "type": "object"
- },
- "ListSubscriptionsResponse": {
- "id": "ListSubscriptionsResponse",
- "type": "object",
- "properties": {
- "nextPageToken": {
- "type": "string"
- },
- "subscriptions": {
- "type": "array",
- "items": {
- "$ref": "Subscription"
- }
- }
- }
- },
- "ListTopicSubscriptionsResponse": {
- "id": "ListTopicSubscriptionsResponse",
- "type": "object",
- "properties": {
- "nextPageToken": {
- "type": "string"
- },
- "subscriptions": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "ListTopicsResponse": {
- "id": "ListTopicsResponse",
- "type": "object",
- "properties": {
- "nextPageToken": {
- "type": "string"
- },
- "topics": {
- "type": "array",
- "items": {
- "$ref": "Topic"
- }
- }
- }
- },
- "ModifyAckDeadlineRequest": {
- "id": "ModifyAckDeadlineRequest",
- "type": "object",
- "properties": {
- "ackDeadlineSeconds": {
- "type": "integer",
- "format": "int32"
- },
- "ackId": {
- "type": "string"
- }
- }
- },
- "ModifyPushConfigRequest": {
- "id": "ModifyPushConfigRequest",
- "type": "object",
- "properties": {
- "pushConfig": {
- "$ref": "PushConfig"
- }
- }
- },
- "PublishRequest": {
- "id": "PublishRequest",
- "type": "object",
- "properties": {
- "messages": {
- "type": "array",
- "items": {
- "$ref": "PubsubMessage"
- }
- }
- }
- },
- "PublishResponse": {
- "id": "PublishResponse",
- "type": "object",
- "properties": {
- "messageIds": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "PubsubMessage": {
- "id": "PubsubMessage",
- "type": "object",
- "properties": {
- "attributes": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "data": {
- "type": "string",
- "format": "byte"
- },
- "messageId": {
- "type": "string"
- }
- }
- },
- "PullRequest": {
- "id": "PullRequest",
- "type": "object",
- "properties": {
- "maxMessages": {
- "type": "integer",
- "format": "int32"
- },
- "returnImmediately": {
- "type": "boolean"
- }
- }
- },
- "PullResponse": {
- "id": "PullResponse",
- "type": "object",
- "properties": {
- "receivedMessages": {
- "type": "array",
- "items": {
- "$ref": "ReceivedMessage"
- }
- }
- }
- },
- "PushConfig": {
- "id": "PushConfig",
- "type": "object",
- "properties": {
- "attributes": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "pushEndpoint": {
- "type": "string"
- }
- }
- },
- "ReceivedMessage": {
- "id": "ReceivedMessage",
- "type": "object",
- "properties": {
- "ackId": {
- "type": "string"
- },
- "message": {
- "$ref": "PubsubMessage"
- }
- }
- },
- "Subscription": {
- "id": "Subscription",
- "type": "object",
- "properties": {
- "ackDeadlineSeconds": {
- "type": "integer",
- "format": "int32"
- },
- "name": {
- "type": "string"
- },
- "pushConfig": {
- "$ref": "PushConfig"
- },
- "topic": {
- "type": "string"
- }
- }
- },
- "Topic": {
- "id": "Topic",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- }
- }
- }
- },
- "resources": {
- "projects": {
- "resources": {
- "subscriptions": {
- "methods": {
- "acknowledge": {
- "id": "pubsub.projects.subscriptions.acknowledge",
- "path": "{+subscription}:acknowledge",
- "httpMethod": "POST",
- "description": "Acknowledges the messages associated with the ack tokens in the AcknowledgeRequest. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.",
- "parameters": {
- "subscription": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "subscription"
- ],
- "request": {
- "$ref": "AcknowledgeRequest"
- },
- "response": {
- "$ref": "Empty"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "create": {
- "id": "pubsub.projects.subscriptions.create",
- "path": "{+name}",
- "httpMethod": "PUT",
- "description": "Creates a subscription to a given topic for a given subscriber. If the subscription already exists, returns ALREADY_EXISTS. If the corresponding topic doesn't exist, returns NOT_FOUND. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.",
- "parameters": {
- "name": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "name"
- ],
- "request": {
- "$ref": "Subscription"
- },
- "response": {
- "$ref": "Subscription"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "delete": {
- "id": "pubsub.projects.subscriptions.delete",
- "path": "{+subscription}",
- "httpMethod": "DELETE",
- "description": "Deletes an existing subscription. All pending messages in the subscription are immediately dropped. Calls to Pull after deletion will return NOT_FOUND. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription, or its topic unless the same topic is specified.",
- "parameters": {
- "subscription": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "subscription"
- ],
- "response": {
- "$ref": "Empty"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "get": {
- "id": "pubsub.projects.subscriptions.get",
- "path": "{+subscription}",
- "httpMethod": "GET",
- "description": "Gets the configuration details of a subscription.",
- "parameters": {
- "subscription": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "subscription"
- ],
- "response": {
- "$ref": "Subscription"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "list": {
- "id": "pubsub.projects.subscriptions.list",
- "path": "{+project}/subscriptions",
- "httpMethod": "GET",
- "description": "Lists matching subscriptions.",
- "parameters": {
- "pageSize": {
- "type": "integer",
- "format": "int32",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "location": "query"
- },
- "project": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "project"
- ],
- "response": {
- "$ref": "ListSubscriptionsResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "modifyAckDeadline": {
- "id": "pubsub.projects.subscriptions.modifyAckDeadline",
- "path": "{+subscription}:modifyAckDeadline",
- "httpMethod": "POST",
- "description": "Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted.",
- "parameters": {
- "subscription": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "subscription"
- ],
- "request": {
- "$ref": "ModifyAckDeadlineRequest"
- },
- "response": {
- "$ref": "Empty"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "modifyPushConfig": {
- "id": "pubsub.projects.subscriptions.modifyPushConfig",
- "path": "{+subscription}:modifyPushConfig",
- "httpMethod": "POST",
- "description": "Modifies the PushConfig for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty PushConfig) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the PushConfig.",
- "parameters": {
- "subscription": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "subscription"
- ],
- "request": {
- "$ref": "ModifyPushConfigRequest"
- },
- "response": {
- "$ref": "Empty"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "pull": {
- "id": "pubsub.projects.subscriptions.pull",
- "path": "{+subscription}:pull",
- "httpMethod": "POST",
- "description": "Pulls messages from the server. Returns an empty list if there are no messages available in the backlog. The server may return UNAVAILABLE if there are too many concurrent pull requests pending for the given subscription.",
- "parameters": {
- "subscription": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "subscription"
- ],
- "request": {
- "$ref": "PullRequest"
- },
- "response": {
- "$ref": "PullResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- }
- }
- },
- "topics": {
- "methods": {
- "create": {
- "id": "pubsub.projects.topics.create",
- "path": "{+name}",
- "httpMethod": "PUT",
- "description": "Creates the given topic with the given name.",
- "parameters": {
- "name": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "name"
- ],
- "request": {
- "$ref": "Topic"
- },
- "response": {
- "$ref": "Topic"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "delete": {
- "id": "pubsub.projects.topics.delete",
- "path": "{+topic}",
- "httpMethod": "DELETE",
- "description": "Deletes the topic with the given name. Returns NOT_FOUND if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted.",
- "parameters": {
- "topic": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "topic"
- ],
- "response": {
- "$ref": "Empty"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "get": {
- "id": "pubsub.projects.topics.get",
- "path": "{+topic}",
- "httpMethod": "GET",
- "description": "Gets the configuration of a topic.",
- "parameters": {
- "topic": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "topic"
- ],
- "response": {
- "$ref": "Topic"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "list": {
- "id": "pubsub.projects.topics.list",
- "path": "{+project}/topics",
- "httpMethod": "GET",
- "description": "Lists matching topics.",
- "parameters": {
- "pageSize": {
- "type": "integer",
- "format": "int32",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "location": "query"
- },
- "project": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "project"
- ],
- "response": {
- "$ref": "ListTopicsResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- },
- "publish": {
- "id": "pubsub.projects.topics.publish",
- "path": "{+topic}:publish",
- "httpMethod": "POST",
- "description": "Adds one or more messages to the topic. Returns NOT_FOUND if the topic does not exist.",
- "parameters": {
- "topic": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "topic"
- ],
- "request": {
- "$ref": "PublishRequest"
- },
- "response": {
- "$ref": "PublishResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- }
- },
- "resources": {
- "subscriptions": {
- "methods": {
- "list": {
- "id": "pubsub.projects.topics.subscriptions.list",
- "path": "{+topic}/subscriptions",
- "httpMethod": "GET",
- "description": "Lists the name of the subscriptions for this topic.",
- "parameters": {
- "pageSize": {
- "type": "integer",
- "format": "int32",
- "location": "query"
- },
- "pageToken": {
- "type": "string",
- "location": "query"
- },
- "topic": {
- "type": "string",
- "required": true,
- "location": "path"
- }
- },
- "parameterOrder": [
- "topic"
- ],
- "response": {
- "$ref": "ListTopicSubscriptionsResponse"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/pubsub"
- ]
- }
- }
- }
- }
- }
- }
- }
- }
-}
diff --git a/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-gen.go b/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-gen.go
deleted file mode 100644
index 632df9f..0000000
--- a/vendor/google.golang.org/api/pubsub/v1beta2/pubsub-gen.go
+++ /dev/null
@@ -1,1458 +0,0 @@
-// Package pubsub provides access to the Google Cloud Pub/Sub API.
-//
-// Usage example:
-//
-// import "google.golang.org/api/pubsub/v1beta2"
-// ...
-// pubsubService, err := pubsub.New(oauthHttpClient)
-package pubsub
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "golang.org/x/net/context"
- "google.golang.org/api/googleapi"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-)
-
-// Always reference these packages, just in case the auto-generated code
-// below doesn't.
-var _ = bytes.NewBuffer
-var _ = strconv.Itoa
-var _ = fmt.Sprintf
-var _ = json.NewDecoder
-var _ = io.Copy
-var _ = url.Parse
-var _ = googleapi.Version
-var _ = errors.New
-var _ = strings.Replace
-var _ = context.Background
-
-const apiId = "pubsub:v1beta2"
-const apiName = "pubsub"
-const apiVersion = "v1beta2"
-const basePath = "https://pubsub.googleapis.com/v1beta2/"
-
-// OAuth2 scopes used by this API.
-const (
- // View and manage your data across Google Cloud Platform services
- CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
-
- // View and manage Pub/Sub topics and subscriptions
- PubsubScope = "https://www.googleapis.com/auth/pubsub"
-)
-
-func New(client *http.Client) (*Service, error) {
- if client == nil {
- return nil, errors.New("client is nil")
- }
- s := &Service{client: client, BasePath: basePath}
- s.Projects = NewProjectsService(s)
- return s, nil
-}
-
-type Service struct {
- client *http.Client
- BasePath string // API endpoint base URL
- UserAgent string // optional additional User-Agent fragment
-
- Projects *ProjectsService
-}
-
-func (s *Service) userAgent() string {
- if s.UserAgent == "" {
- return googleapi.UserAgent
- }
- return googleapi.UserAgent + " " + s.UserAgent
-}
-
-func NewProjectsService(s *Service) *ProjectsService {
- rs := &ProjectsService{s: s}
- rs.Subscriptions = NewProjectsSubscriptionsService(s)
- rs.Topics = NewProjectsTopicsService(s)
- return rs
-}
-
-type ProjectsService struct {
- s *Service
-
- Subscriptions *ProjectsSubscriptionsService
-
- Topics *ProjectsTopicsService
-}
-
-func NewProjectsSubscriptionsService(s *Service) *ProjectsSubscriptionsService {
- rs := &ProjectsSubscriptionsService{s: s}
- return rs
-}
-
-type ProjectsSubscriptionsService struct {
- s *Service
-}
-
-func NewProjectsTopicsService(s *Service) *ProjectsTopicsService {
- rs := &ProjectsTopicsService{s: s}
- rs.Subscriptions = NewProjectsTopicsSubscriptionsService(s)
- return rs
-}
-
-type ProjectsTopicsService struct {
- s *Service
-
- Subscriptions *ProjectsTopicsSubscriptionsService
-}
-
-func NewProjectsTopicsSubscriptionsService(s *Service) *ProjectsTopicsSubscriptionsService {
- rs := &ProjectsTopicsSubscriptionsService{s: s}
- return rs
-}
-
-type ProjectsTopicsSubscriptionsService struct {
- s *Service
-}
-
-type AcknowledgeRequest struct {
- AckIds []string `json:"ackIds,omitempty"`
-}
-
-type Empty struct {
-}
-
-type ListSubscriptionsResponse struct {
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- Subscriptions []*Subscription `json:"subscriptions,omitempty"`
-}
-
-type ListTopicSubscriptionsResponse struct {
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- Subscriptions []string `json:"subscriptions,omitempty"`
-}
-
-type ListTopicsResponse struct {
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- Topics []*Topic `json:"topics,omitempty"`
-}
-
-type ModifyAckDeadlineRequest struct {
- AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"`
-
- AckId string `json:"ackId,omitempty"`
-}
-
-type ModifyPushConfigRequest struct {
- PushConfig *PushConfig `json:"pushConfig,omitempty"`
-}
-
-type PublishRequest struct {
- Messages []*PubsubMessage `json:"messages,omitempty"`
-}
-
-type PublishResponse struct {
- MessageIds []string `json:"messageIds,omitempty"`
-}
-
-type PubsubMessage struct {
- Attributes map[string]string `json:"attributes,omitempty"`
-
- Data string `json:"data,omitempty"`
-
- MessageId string `json:"messageId,omitempty"`
-}
-
-type PullRequest struct {
- MaxMessages int64 `json:"maxMessages,omitempty"`
-
- ReturnImmediately bool `json:"returnImmediately,omitempty"`
-}
-
-type PullResponse struct {
- ReceivedMessages []*ReceivedMessage `json:"receivedMessages,omitempty"`
-}
-
-type PushConfig struct {
- Attributes map[string]string `json:"attributes,omitempty"`
-
- PushEndpoint string `json:"pushEndpoint,omitempty"`
-}
-
-type ReceivedMessage struct {
- AckId string `json:"ackId,omitempty"`
-
- Message *PubsubMessage `json:"message,omitempty"`
-}
-
-type Subscription struct {
- AckDeadlineSeconds int64 `json:"ackDeadlineSeconds,omitempty"`
-
- Name string `json:"name,omitempty"`
-
- PushConfig *PushConfig `json:"pushConfig,omitempty"`
-
- Topic string `json:"topic,omitempty"`
-}
-
-type Topic struct {
- Name string `json:"name,omitempty"`
-}
-
-// method id "pubsub.projects.subscriptions.acknowledge":
-
-type ProjectsSubscriptionsAcknowledgeCall struct {
- s *Service
- subscription string
- acknowledgerequest *AcknowledgeRequest
- opt_ map[string]interface{}
-}
-
-// Acknowledge: Acknowledges the messages associated with the ack tokens
-// in the AcknowledgeRequest. The Pub/Sub system can remove the relevant
-// messages from the subscription. Acknowledging a message whose ack
-// deadline has expired may succeed, but such a message may be
-// redelivered later. Acknowledging a message more than once will not
-// result in an error.
-func (r *ProjectsSubscriptionsService) Acknowledge(subscription string, acknowledgerequest *AcknowledgeRequest) *ProjectsSubscriptionsAcknowledgeCall {
- c := &ProjectsSubscriptionsAcknowledgeCall{s: r.s, opt_: make(map[string]interface{})}
- c.subscription = subscription
- c.acknowledgerequest = acknowledgerequest
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsSubscriptionsAcknowledgeCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsAcknowledgeCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsSubscriptionsAcknowledgeCall) Do() (*Empty, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.acknowledgerequest)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}:acknowledge")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "subscription": c.subscription,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Empty
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Acknowledges the messages associated with the ack tokens in the AcknowledgeRequest. The Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message whose ack deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a message more than once will not result in an error.",
- // "httpMethod": "POST",
- // "id": "pubsub.projects.subscriptions.acknowledge",
- // "parameterOrder": [
- // "subscription"
- // ],
- // "parameters": {
- // "subscription": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+subscription}:acknowledge",
- // "request": {
- // "$ref": "AcknowledgeRequest"
- // },
- // "response": {
- // "$ref": "Empty"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.subscriptions.create":
-
-type ProjectsSubscriptionsCreateCall struct {
- s *Service
- name string
- subscription *Subscription
- opt_ map[string]interface{}
-}
-
-// Create: Creates a subscription to a given topic for a given
-// subscriber. If the subscription already exists, returns
-// ALREADY_EXISTS. If the corresponding topic doesn't exist, returns
-// NOT_FOUND. If the name is not provided in the request, the server
-// will assign a random name for this subscription on the same project
-// as the topic.
-func (r *ProjectsSubscriptionsService) Create(name string, subscription *Subscription) *ProjectsSubscriptionsCreateCall {
- c := &ProjectsSubscriptionsCreateCall{s: r.s, opt_: make(map[string]interface{})}
- c.name = name
- c.subscription = subscription
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsSubscriptionsCreateCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsCreateCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsSubscriptionsCreateCall) Do() (*Subscription, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.subscription)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+name}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "name": c.name,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Subscription
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a subscription to a given topic for a given subscriber. If the subscription already exists, returns ALREADY_EXISTS. If the corresponding topic doesn't exist, returns NOT_FOUND. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic.",
- // "httpMethod": "PUT",
- // "id": "pubsub.projects.subscriptions.create",
- // "parameterOrder": [
- // "name"
- // ],
- // "parameters": {
- // "name": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+name}",
- // "request": {
- // "$ref": "Subscription"
- // },
- // "response": {
- // "$ref": "Subscription"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.subscriptions.delete":
-
-type ProjectsSubscriptionsDeleteCall struct {
- s *Service
- subscription string
- opt_ map[string]interface{}
-}
-
-// Delete: Deletes an existing subscription. All pending messages in the
-// subscription are immediately dropped. Calls to Pull after deletion
-// will return NOT_FOUND. After a subscription is deleted, a new one may
-// be created with the same name, but the new one has no association
-// with the old subscription, or its topic unless the same topic is
-// specified.
-func (r *ProjectsSubscriptionsService) Delete(subscription string) *ProjectsSubscriptionsDeleteCall {
- c := &ProjectsSubscriptionsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
- c.subscription = subscription
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsSubscriptionsDeleteCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsDeleteCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsSubscriptionsDeleteCall) Do() (*Empty, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "subscription": c.subscription,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Empty
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Deletes an existing subscription. All pending messages in the subscription are immediately dropped. Calls to Pull after deletion will return NOT_FOUND. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription, or its topic unless the same topic is specified.",
- // "httpMethod": "DELETE",
- // "id": "pubsub.projects.subscriptions.delete",
- // "parameterOrder": [
- // "subscription"
- // ],
- // "parameters": {
- // "subscription": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+subscription}",
- // "response": {
- // "$ref": "Empty"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.subscriptions.get":
-
-type ProjectsSubscriptionsGetCall struct {
- s *Service
- subscription string
- opt_ map[string]interface{}
-}
-
-// Get: Gets the configuration details of a subscription.
-func (r *ProjectsSubscriptionsService) Get(subscription string) *ProjectsSubscriptionsGetCall {
- c := &ProjectsSubscriptionsGetCall{s: r.s, opt_: make(map[string]interface{})}
- c.subscription = subscription
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsSubscriptionsGetCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsGetCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsSubscriptionsGetCall) Do() (*Subscription, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "subscription": c.subscription,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Subscription
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Gets the configuration details of a subscription.",
- // "httpMethod": "GET",
- // "id": "pubsub.projects.subscriptions.get",
- // "parameterOrder": [
- // "subscription"
- // ],
- // "parameters": {
- // "subscription": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+subscription}",
- // "response": {
- // "$ref": "Subscription"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.subscriptions.list":
-
-type ProjectsSubscriptionsListCall struct {
- s *Service
- project string
- opt_ map[string]interface{}
-}
-
-// List: Lists matching subscriptions.
-func (r *ProjectsSubscriptionsService) List(project string) *ProjectsSubscriptionsListCall {
- c := &ProjectsSubscriptionsListCall{s: r.s, opt_: make(map[string]interface{})}
- c.project = project
- return c
-}
-
-// PageSize sets the optional parameter "pageSize":
-func (c *ProjectsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsSubscriptionsListCall {
- c.opt_["pageSize"] = pageSize
- return c
-}
-
-// PageToken sets the optional parameter "pageToken":
-func (c *ProjectsSubscriptionsListCall) PageToken(pageToken string) *ProjectsSubscriptionsListCall {
- c.opt_["pageToken"] = pageToken
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsSubscriptionsListCall) Do() (*ListSubscriptionsResponse, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["pageSize"]; ok {
- params.Set("pageSize", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["pageToken"]; ok {
- params.Set("pageToken", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+project}/subscriptions")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *ListSubscriptionsResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists matching subscriptions.",
- // "httpMethod": "GET",
- // "id": "pubsub.projects.subscriptions.list",
- // "parameterOrder": [
- // "project"
- // ],
- // "parameters": {
- // "pageSize": {
- // "format": "int32",
- // "location": "query",
- // "type": "integer"
- // },
- // "pageToken": {
- // "location": "query",
- // "type": "string"
- // },
- // "project": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+project}/subscriptions",
- // "response": {
- // "$ref": "ListSubscriptionsResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.subscriptions.modifyAckDeadline":
-
-type ProjectsSubscriptionsModifyAckDeadlineCall struct {
- s *Service
- subscription string
- modifyackdeadlinerequest *ModifyAckDeadlineRequest
- opt_ map[string]interface{}
-}
-
-// ModifyAckDeadline: Modifies the ack deadline for a specific message.
-// This method is useful to indicate that more time is needed to process
-// a message by the subscriber, or to make the message available for
-// redelivery if the processing was interrupted.
-func (r *ProjectsSubscriptionsService) ModifyAckDeadline(subscription string, modifyackdeadlinerequest *ModifyAckDeadlineRequest) *ProjectsSubscriptionsModifyAckDeadlineCall {
- c := &ProjectsSubscriptionsModifyAckDeadlineCall{s: r.s, opt_: make(map[string]interface{})}
- c.subscription = subscription
- c.modifyackdeadlinerequest = modifyackdeadlinerequest
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyAckDeadlineCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsSubscriptionsModifyAckDeadlineCall) Do() (*Empty, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifyackdeadlinerequest)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}:modifyAckDeadline")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "subscription": c.subscription,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Empty
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Modifies the ack deadline for a specific message. This method is useful to indicate that more time is needed to process a message by the subscriber, or to make the message available for redelivery if the processing was interrupted.",
- // "httpMethod": "POST",
- // "id": "pubsub.projects.subscriptions.modifyAckDeadline",
- // "parameterOrder": [
- // "subscription"
- // ],
- // "parameters": {
- // "subscription": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+subscription}:modifyAckDeadline",
- // "request": {
- // "$ref": "ModifyAckDeadlineRequest"
- // },
- // "response": {
- // "$ref": "Empty"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.subscriptions.modifyPushConfig":
-
-type ProjectsSubscriptionsModifyPushConfigCall struct {
- s *Service
- subscription string
- modifypushconfigrequest *ModifyPushConfigRequest
- opt_ map[string]interface{}
-}
-
-// ModifyPushConfig: Modifies the PushConfig for a specified
-// subscription. This may be used to change a push subscription to a
-// pull one (signified by an empty PushConfig) or vice versa, or change
-// the endpoint URL and other attributes of a push subscription.
-// Messages will accumulate for delivery continuously through the call
-// regardless of changes to the PushConfig.
-func (r *ProjectsSubscriptionsService) ModifyPushConfig(subscription string, modifypushconfigrequest *ModifyPushConfigRequest) *ProjectsSubscriptionsModifyPushConfigCall {
- c := &ProjectsSubscriptionsModifyPushConfigCall{s: r.s, opt_: make(map[string]interface{})}
- c.subscription = subscription
- c.modifypushconfigrequest = modifypushconfigrequest
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsSubscriptionsModifyPushConfigCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsModifyPushConfigCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsSubscriptionsModifyPushConfigCall) Do() (*Empty, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.modifypushconfigrequest)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}:modifyPushConfig")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "subscription": c.subscription,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Empty
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Modifies the PushConfig for a specified subscription. This may be used to change a push subscription to a pull one (signified by an empty PushConfig) or vice versa, or change the endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery continuously through the call regardless of changes to the PushConfig.",
- // "httpMethod": "POST",
- // "id": "pubsub.projects.subscriptions.modifyPushConfig",
- // "parameterOrder": [
- // "subscription"
- // ],
- // "parameters": {
- // "subscription": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+subscription}:modifyPushConfig",
- // "request": {
- // "$ref": "ModifyPushConfigRequest"
- // },
- // "response": {
- // "$ref": "Empty"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.subscriptions.pull":
-
-type ProjectsSubscriptionsPullCall struct {
- s *Service
- subscription string
- pullrequest *PullRequest
- opt_ map[string]interface{}
-}
-
-// Pull: Pulls messages from the server. Returns an empty list if there
-// are no messages available in the backlog. The server may return
-// UNAVAILABLE if there are too many concurrent pull requests pending
-// for the given subscription.
-func (r *ProjectsSubscriptionsService) Pull(subscription string, pullrequest *PullRequest) *ProjectsSubscriptionsPullCall {
- c := &ProjectsSubscriptionsPullCall{s: r.s, opt_: make(map[string]interface{})}
- c.subscription = subscription
- c.pullrequest = pullrequest
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsSubscriptionsPullCall) Fields(s ...googleapi.Field) *ProjectsSubscriptionsPullCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsSubscriptionsPullCall) Do() (*PullResponse, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.pullrequest)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+subscription}:pull")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "subscription": c.subscription,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *PullResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Pulls messages from the server. Returns an empty list if there are no messages available in the backlog. The server may return UNAVAILABLE if there are too many concurrent pull requests pending for the given subscription.",
- // "httpMethod": "POST",
- // "id": "pubsub.projects.subscriptions.pull",
- // "parameterOrder": [
- // "subscription"
- // ],
- // "parameters": {
- // "subscription": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+subscription}:pull",
- // "request": {
- // "$ref": "PullRequest"
- // },
- // "response": {
- // "$ref": "PullResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.topics.create":
-
-type ProjectsTopicsCreateCall struct {
- s *Service
- name string
- topic *Topic
- opt_ map[string]interface{}
-}
-
-// Create: Creates the given topic with the given name.
-func (r *ProjectsTopicsService) Create(name string, topic *Topic) *ProjectsTopicsCreateCall {
- c := &ProjectsTopicsCreateCall{s: r.s, opt_: make(map[string]interface{})}
- c.name = name
- c.topic = topic
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsTopicsCreateCall) Fields(s ...googleapi.Field) *ProjectsTopicsCreateCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsTopicsCreateCall) Do() (*Topic, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.topic)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+name}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("PUT", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "name": c.name,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Topic
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates the given topic with the given name.",
- // "httpMethod": "PUT",
- // "id": "pubsub.projects.topics.create",
- // "parameterOrder": [
- // "name"
- // ],
- // "parameters": {
- // "name": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+name}",
- // "request": {
- // "$ref": "Topic"
- // },
- // "response": {
- // "$ref": "Topic"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.topics.delete":
-
-type ProjectsTopicsDeleteCall struct {
- s *Service
- topic string
- opt_ map[string]interface{}
-}
-
-// Delete: Deletes the topic with the given name. Returns NOT_FOUND if
-// the topic does not exist. After a topic is deleted, a new topic may
-// be created with the same name; this is an entirely new topic with
-// none of the old configuration or subscriptions. Existing
-// subscriptions to this topic are not deleted.
-func (r *ProjectsTopicsService) Delete(topic string) *ProjectsTopicsDeleteCall {
- c := &ProjectsTopicsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
- c.topic = topic
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsTopicsDeleteCall) Fields(s ...googleapi.Field) *ProjectsTopicsDeleteCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsTopicsDeleteCall) Do() (*Empty, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+topic}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("DELETE", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "topic": c.topic,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Empty
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Deletes the topic with the given name. Returns NOT_FOUND if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted.",
- // "httpMethod": "DELETE",
- // "id": "pubsub.projects.topics.delete",
- // "parameterOrder": [
- // "topic"
- // ],
- // "parameters": {
- // "topic": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+topic}",
- // "response": {
- // "$ref": "Empty"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.topics.get":
-
-type ProjectsTopicsGetCall struct {
- s *Service
- topic string
- opt_ map[string]interface{}
-}
-
-// Get: Gets the configuration of a topic.
-func (r *ProjectsTopicsService) Get(topic string) *ProjectsTopicsGetCall {
- c := &ProjectsTopicsGetCall{s: r.s, opt_: make(map[string]interface{})}
- c.topic = topic
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsTopicsGetCall) Fields(s ...googleapi.Field) *ProjectsTopicsGetCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsTopicsGetCall) Do() (*Topic, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+topic}")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "topic": c.topic,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *Topic
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Gets the configuration of a topic.",
- // "httpMethod": "GET",
- // "id": "pubsub.projects.topics.get",
- // "parameterOrder": [
- // "topic"
- // ],
- // "parameters": {
- // "topic": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+topic}",
- // "response": {
- // "$ref": "Topic"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.topics.list":
-
-type ProjectsTopicsListCall struct {
- s *Service
- project string
- opt_ map[string]interface{}
-}
-
-// List: Lists matching topics.
-func (r *ProjectsTopicsService) List(project string) *ProjectsTopicsListCall {
- c := &ProjectsTopicsListCall{s: r.s, opt_: make(map[string]interface{})}
- c.project = project
- return c
-}
-
-// PageSize sets the optional parameter "pageSize":
-func (c *ProjectsTopicsListCall) PageSize(pageSize int64) *ProjectsTopicsListCall {
- c.opt_["pageSize"] = pageSize
- return c
-}
-
-// PageToken sets the optional parameter "pageToken":
-func (c *ProjectsTopicsListCall) PageToken(pageToken string) *ProjectsTopicsListCall {
- c.opt_["pageToken"] = pageToken
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsTopicsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsTopicsListCall) Do() (*ListTopicsResponse, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["pageSize"]; ok {
- params.Set("pageSize", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["pageToken"]; ok {
- params.Set("pageToken", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+project}/topics")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *ListTopicsResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists matching topics.",
- // "httpMethod": "GET",
- // "id": "pubsub.projects.topics.list",
- // "parameterOrder": [
- // "project"
- // ],
- // "parameters": {
- // "pageSize": {
- // "format": "int32",
- // "location": "query",
- // "type": "integer"
- // },
- // "pageToken": {
- // "location": "query",
- // "type": "string"
- // },
- // "project": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+project}/topics",
- // "response": {
- // "$ref": "ListTopicsResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.topics.publish":
-
-type ProjectsTopicsPublishCall struct {
- s *Service
- topic string
- publishrequest *PublishRequest
- opt_ map[string]interface{}
-}
-
-// Publish: Adds one or more messages to the topic. Returns NOT_FOUND if
-// the topic does not exist.
-func (r *ProjectsTopicsService) Publish(topic string, publishrequest *PublishRequest) *ProjectsTopicsPublishCall {
- c := &ProjectsTopicsPublishCall{s: r.s, opt_: make(map[string]interface{})}
- c.topic = topic
- c.publishrequest = publishrequest
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsTopicsPublishCall) Fields(s ...googleapi.Field) *ProjectsTopicsPublishCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsTopicsPublishCall) Do() (*PublishResponse, error) {
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.publishrequest)
- if err != nil {
- return nil, err
- }
- ctype := "application/json"
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+topic}:publish")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("POST", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "topic": c.topic,
- })
- req.Header.Set("Content-Type", ctype)
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *PublishResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Adds one or more messages to the topic. Returns NOT_FOUND if the topic does not exist.",
- // "httpMethod": "POST",
- // "id": "pubsub.projects.topics.publish",
- // "parameterOrder": [
- // "topic"
- // ],
- // "parameters": {
- // "topic": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+topic}:publish",
- // "request": {
- // "$ref": "PublishRequest"
- // },
- // "response": {
- // "$ref": "PublishResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
-
-// method id "pubsub.projects.topics.subscriptions.list":
-
-type ProjectsTopicsSubscriptionsListCall struct {
- s *Service
- topic string
- opt_ map[string]interface{}
-}
-
-// List: Lists the name of the subscriptions for this topic.
-func (r *ProjectsTopicsSubscriptionsService) List(topic string) *ProjectsTopicsSubscriptionsListCall {
- c := &ProjectsTopicsSubscriptionsListCall{s: r.s, opt_: make(map[string]interface{})}
- c.topic = topic
- return c
-}
-
-// PageSize sets the optional parameter "pageSize":
-func (c *ProjectsTopicsSubscriptionsListCall) PageSize(pageSize int64) *ProjectsTopicsSubscriptionsListCall {
- c.opt_["pageSize"] = pageSize
- return c
-}
-
-// PageToken sets the optional parameter "pageToken":
-func (c *ProjectsTopicsSubscriptionsListCall) PageToken(pageToken string) *ProjectsTopicsSubscriptionsListCall {
- c.opt_["pageToken"] = pageToken
- return c
-}
-
-// Fields allows partial responses to be retrieved.
-// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *ProjectsTopicsSubscriptionsListCall) Fields(s ...googleapi.Field) *ProjectsTopicsSubscriptionsListCall {
- c.opt_["fields"] = googleapi.CombineFields(s)
- return c
-}
-
-func (c *ProjectsTopicsSubscriptionsListCall) Do() (*ListTopicSubscriptionsResponse, error) {
- var body io.Reader = nil
- params := make(url.Values)
- params.Set("alt", "json")
- if v, ok := c.opt_["pageSize"]; ok {
- params.Set("pageSize", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["pageToken"]; ok {
- params.Set("pageToken", fmt.Sprintf("%v", v))
- }
- if v, ok := c.opt_["fields"]; ok {
- params.Set("fields", fmt.Sprintf("%v", v))
- }
- urls := googleapi.ResolveRelative(c.s.BasePath, "{+topic}/subscriptions")
- urls += "?" + params.Encode()
- req, _ := http.NewRequest("GET", urls, body)
- googleapi.Expand(req.URL, map[string]string{
- "topic": c.topic,
- })
- req.Header.Set("User-Agent", c.s.userAgent())
- res, err := c.s.client.Do(req)
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, err
- }
- var ret *ListTopicSubscriptionsResponse
- if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Lists the name of the subscriptions for this topic.",
- // "httpMethod": "GET",
- // "id": "pubsub.projects.topics.subscriptions.list",
- // "parameterOrder": [
- // "topic"
- // ],
- // "parameters": {
- // "pageSize": {
- // "format": "int32",
- // "location": "query",
- // "type": "integer"
- // },
- // "pageToken": {
- // "location": "query",
- // "type": "string"
- // },
- // "topic": {
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "{+topic}/subscriptions",
- // "response": {
- // "$ref": "ListTopicSubscriptionsResponse"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/pubsub"
- // ]
- // }
-
-}
diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml
deleted file mode 100644
index 7715209..0000000
--- a/vendor/google.golang.org/appengine/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-sudo: false
-
-go:
- - 1.4
-
-install:
- - go get -v -t -d google.golang.org/appengine/...
- - mkdir sdk
- - curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.24.zip"
- - unzip sdk.zip -d sdk
- - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py
-
-script:
- - go version
- - go test -v google.golang.org/appengine/...
- - go test -v -race google.golang.org/appengine/...
- - sdk/go_appengine/goapp test -v google.golang.org/appengine/...
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/google.golang.org/appengine/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
deleted file mode 100644
index 5ae34df..0000000
--- a/vendor/google.golang.org/appengine/README.md
+++ /dev/null
@@ -1,75 +0,0 @@
-# Go App Engine packages
-
-[](https://travis-ci.org/golang/appengine)
-
-This repository supports the Go runtime on App Engine,
-including both classic App Engine and Managed VMs.
-It provides APIs for interacting with App Engine services.
-Its canonical import path is `google.golang.org/appengine`.
-
-See https://cloud.google.com/appengine/docs/go/
-for more information.
-
-File issue reports and feature requests on the [Google App Engine issue
-tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect).
-
-## Directory structure
-The top level directory of this repository is the `appengine` package. It
-contains the
-basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
-packages are in subdirectories (e.g. `datastore`).
-
-There is an `internal` subdirectory that contains service protocol buffers,
-plus packages required for connectivity to make API calls. App Engine apps
-should not directly import any package under `internal`.
-
-## Updating a Go App Engine app
-
-This section describes how to update a traditional Go App Engine app to use
-these packages.
-
-### 1. Update YAML files (Managed VMs only)
-
-The `app.yaml` file (and YAML files for modules) should have these new lines added:
-```
-vm: true
-manual_scaling:
- instances: 1
-```
-See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details.
-
-### 2. Update import paths
-
-The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
-You will need to update your code to use import paths starting with that; for instance,
-code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
-You can do that manually, or by running this command to recursively update all Go source files in the current directory:
-(may require GNU sed)
-```
-sed -i '/"appengine/{s,"appengine,"google.golang.org/appengine,;s,appengine_,appengine/,}' \
- $(find . -name '*.go')
-```
-
-### 3. Update code using deprecated, removed or modified APIs
-
-Most App Engine services are available with exactly the same API.
-A few APIs were cleaned up, and some are not available yet.
-This list summarises the differences:
-
-* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
-* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
-* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
-* `appengine.Datacenter` now takes a `context.Context` argument.
-* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
-* `delay.Call` now returns an error.
-* `search.FieldLoadSaver` now handles document metadata.
-* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
- `context.Context` instead.
-* `aetest` no longer declares its own Context type, and uses the standard one instead.
-* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
- deprecated and unused for a long time.
-* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
- Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
-* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
- Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead.
-* `appengine/socket` is not required on Managed VMs. Use the standard `net` package instead.
diff --git a/vendor/google.golang.org/appengine/aetest/doc.go b/vendor/google.golang.org/appengine/aetest/doc.go
deleted file mode 100644
index 86ce8c2..0000000
--- a/vendor/google.golang.org/appengine/aetest/doc.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-Package aetest provides an API for running dev_appserver for use in tests.
-
-An example test file:
-
- package foo_test
-
- import (
- "testing"
-
- "google.golang.org/appengine/memcache"
- "google.golang.org/appengine/aetest"
- )
-
- func TestFoo(t *testing.T) {
- ctx, done, err := aetest.NewContext()
- if err != nil {
- t.Fatal(err)
- }
- defer done()
-
- it := &memcache.Item{
- Key: "some-key",
- Value: []byte("some-value"),
- }
- err = memcache.Set(ctx, it)
- if err != nil {
- t.Fatalf("Set err: %v", err)
- }
- it, err = memcache.Get(ctx, "some-key")
- if err != nil {
- t.Fatalf("Get err: %v; want no error", err)
- }
- if g, w := string(it.Value), "some-value" ; g != w {
- t.Errorf("retrieved Item.Value = %q, want %q", g, w)
- }
- }
-
-The environment variable APPENGINE_DEV_APPSERVER specifies the location of the
-dev_appserver.py executable to use. If unset, the system PATH is consulted.
-*/
-package aetest
diff --git a/vendor/google.golang.org/appengine/aetest/instance.go b/vendor/google.golang.org/appengine/aetest/instance.go
deleted file mode 100644
index a8f99d8..0000000
--- a/vendor/google.golang.org/appengine/aetest/instance.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package aetest
-
-import (
- "io"
- "net/http"
-
- "golang.org/x/net/context"
- "google.golang.org/appengine"
-)
-
-// Instance represents a running instance of the development API Server.
-type Instance interface {
- // Close kills the child api_server.py process, releasing its resources.
- io.Closer
- // NewRequest returns an *http.Request associated with this instance.
- NewRequest(method, urlStr string, body io.Reader) (*http.Request, error)
-}
-
-// Options is used to specify options when creating an Instance.
-type Options struct {
- // AppID specifies the App ID to use during tests.
- // By default, "testapp".
- AppID string
- // StronglyConsistentDatastore is whether the local datastore should be
- // strongly consistent. This will diverge from production behaviour.
- StronglyConsistentDatastore bool
-}
-
-// NewContext starts an instance of the development API server, and returns
-// a context that will route all API calls to that server, as well as a
-// closure that must be called when the Context is no longer required.
-func NewContext() (context.Context, func(), error) {
- inst, err := NewInstance(nil)
- if err != nil {
- return nil, nil, err
- }
- req, err := inst.NewRequest("GET", "/", nil)
- if err != nil {
- inst.Close()
- return nil, nil, err
- }
- ctx := appengine.NewContext(req)
- return ctx, func() {
- inst.Close()
- }, nil
-}
-
-// PrepareDevAppserver is a hook which, if set, will be called before the
-// dev_appserver.py is started, each time it is started. If aetest.NewContext
-// is invoked from the goapp test tool, this hook is unnecessary.
-var PrepareDevAppserver func() error
diff --git a/vendor/google.golang.org/appengine/aetest/instance_classic.go b/vendor/google.golang.org/appengine/aetest/instance_classic.go
deleted file mode 100644
index fbceaa5..0000000
--- a/vendor/google.golang.org/appengine/aetest/instance_classic.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// +build appengine
-
-package aetest
-
-import "appengine/aetest"
-
-// NewInstance launches a running instance of api_server.py which can be used
-// for multiple test Contexts that delegate all App Engine API calls to that
-// instance.
-// If opts is nil the default values are used.
-func NewInstance(opts *Options) (Instance, error) {
- aetest.PrepareDevAppserver = PrepareDevAppserver
- var aeOpts *aetest.Options
- if opts != nil {
- aeOpts = &aetest.Options{
- AppID: opts.AppID,
- StronglyConsistentDatastore: opts.StronglyConsistentDatastore,
- }
- }
- return aetest.NewInstance(aeOpts)
-}
diff --git a/vendor/google.golang.org/appengine/aetest/instance_vm.go b/vendor/google.golang.org/appengine/aetest/instance_vm.go
deleted file mode 100644
index ee81480..0000000
--- a/vendor/google.golang.org/appengine/aetest/instance_vm.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// +build !appengine
-
-package aetest
-
-import (
- "bufio"
- "crypto/rand"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "os"
- "os/exec"
- "path/filepath"
- "regexp"
- "time"
-
- "golang.org/x/net/context"
- "google.golang.org/appengine/internal"
-)
-
-// NewInstance launches a running instance of api_server.py which can be used
-// for multiple test Contexts that delegate all App Engine API calls to that
-// instance.
-// If opts is nil the default values are used.
-func NewInstance(opts *Options) (Instance, error) {
- i := &instance{
- opts: opts,
- appID: "testapp",
- }
- if opts != nil && opts.AppID != "" {
- i.appID = opts.AppID
- }
- if err := i.startChild(); err != nil {
- return nil, err
- }
- return i, nil
-}
-
-func newSessionID() string {
- var buf [16]byte
- io.ReadFull(rand.Reader, buf[:])
- return fmt.Sprintf("%x", buf[:])
-}
-
-// instance implements the Instance interface.
-type instance struct {
- opts *Options
- child *exec.Cmd
- apiURL *url.URL // base URL of API HTTP server
- adminURL string // base URL of admin HTTP server
- appDir string
- appID string
- relFuncs []func() // funcs to release any associated contexts
-}
-
-// NewRequest returns an *http.Request associated with this instance.
-func (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
- req, err := http.NewRequest(method, urlStr, body)
- if err != nil {
- return nil, err
- }
-
- // Associate this request.
- release := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context {
- ctx = internal.WithAppIDOverride(ctx, "dev~"+i.appID)
- return ctx
- })
- i.relFuncs = append(i.relFuncs, release)
-
- return req, nil
-}
-
-// Close kills the child api_server.py process, releasing its resources.
-func (i *instance) Close() (err error) {
- for _, rel := range i.relFuncs {
- rel()
- }
- i.relFuncs = nil
- if i.child == nil {
- return nil
- }
- defer func() {
- i.child = nil
- err1 := os.RemoveAll(i.appDir)
- if err == nil {
- err = err1
- }
- }()
-
- if p := i.child.Process; p != nil {
- errc := make(chan error, 1)
- go func() {
- errc <- i.child.Wait()
- }()
-
- // Call the quit handler on the admin server.
- res, err := http.Get(i.adminURL + "/quit")
- if err != nil {
- p.Kill()
- return fmt.Errorf("unable to call /quit handler: %v", err)
- }
- res.Body.Close()
-
- select {
- case <-time.After(15 * time.Second):
- p.Kill()
- return errors.New("timeout killing child process")
- case err = <-errc:
- // Do nothing.
- }
- }
- return
-}
-
-func fileExists(path string) bool {
- _, err := os.Stat(path)
- return err == nil
-}
-
-func findPython() (path string, err error) {
- for _, name := range []string{"python2.7", "python"} {
- path, err = exec.LookPath(name)
- if err == nil {
- return
- }
- }
- return
-}
-
-func findDevAppserver() (string, error) {
- if p := os.Getenv("APPENGINE_DEV_APPSERVER"); p != "" {
- if fileExists(p) {
- return p, nil
- }
- return "", fmt.Errorf("invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist", p)
- }
- return exec.LookPath("dev_appserver.py")
-}
-
-var apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\S+)`)
-var adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\S+)`)
-
-func (i *instance) startChild() (err error) {
- if PrepareDevAppserver != nil {
- if err := PrepareDevAppserver(); err != nil {
- return err
- }
- }
- python, err := findPython()
- if err != nil {
- return fmt.Errorf("Could not find python interpreter: %v", err)
- }
- devAppserver, err := findDevAppserver()
- if err != nil {
- return fmt.Errorf("Could not find dev_appserver.py: %v", err)
- }
-
- i.appDir, err = ioutil.TempDir("", "appengine-aetest")
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- os.RemoveAll(i.appDir)
- }
- }()
- err = os.Mkdir(filepath.Join(i.appDir, "app"), 0755)
- if err != nil {
- return err
- }
- err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "app.yaml"), []byte(i.appYAML()), 0644)
- if err != nil {
- return err
- }
- err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "stubapp.go"), []byte(appSource), 0644)
- if err != nil {
- return err
- }
-
- appserverArgs := []string{
- devAppserver,
- "--port=0",
- "--api_port=0",
- "--admin_port=0",
- "--automatic_restart=false",
- "--skip_sdk_update_check=true",
- "--clear_datastore=true",
- "--clear_search_indexes=true",
- "--datastore_path", filepath.Join(i.appDir, "datastore"),
- }
- if i.opts != nil && i.opts.StronglyConsistentDatastore {
- appserverArgs = append(appserverArgs, "--datastore_consistency_policy=consistent")
- }
- appserverArgs = append(appserverArgs, filepath.Join(i.appDir, "app"))
-
- i.child = exec.Command(python,
- appserverArgs...,
- )
- i.child.Stdout = os.Stdout
- var stderr io.Reader
- stderr, err = i.child.StderrPipe()
- if err != nil {
- return err
- }
- stderr = io.TeeReader(stderr, os.Stderr)
- if err = i.child.Start(); err != nil {
- return err
- }
-
- // Read stderr until we have read the URLs of the API server and admin interface.
- errc := make(chan error, 1)
- go func() {
- s := bufio.NewScanner(stderr)
- for s.Scan() {
- if match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil {
- u, err := url.Parse(match[1])
- if err != nil {
- errc <- fmt.Errorf("failed to parse API URL %q: %v", match[1], err)
- return
- }
- i.apiURL = u
- }
- if match := adminServerAddrRE.FindStringSubmatch(s.Text()); match != nil {
- i.adminURL = match[1]
- }
- if i.adminURL != "" && i.apiURL != nil {
- break
- }
- }
- errc <- s.Err()
- }()
-
- select {
- case <-time.After(15 * time.Second):
- if p := i.child.Process; p != nil {
- p.Kill()
- }
- return errors.New("timeout starting child process")
- case err := <-errc:
- if err != nil {
- return fmt.Errorf("error reading child process stderr: %v", err)
- }
- }
- if i.adminURL == "" {
- return errors.New("unable to find admin server URL")
- }
- if i.apiURL == nil {
- return errors.New("unable to find API server URL")
- }
- return nil
-}
-
-func (i *instance) appYAML() string {
- return fmt.Sprintf(appYAMLTemplate, i.appID)
-}
-
-const appYAMLTemplate = `
-application: %s
-version: 1
-runtime: go
-api_version: go1
-vm: true
-
-handlers:
-- url: /.*
- script: _go_app
-`
-
-const appSource = `
-package main
-import "google.golang.org/appengine"
-func main() { appengine.Main() }
-`
diff --git a/vendor/google.golang.org/appengine/aetest/user.go b/vendor/google.golang.org/appengine/aetest/user.go
deleted file mode 100644
index bf9266f..0000000
--- a/vendor/google.golang.org/appengine/aetest/user.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package aetest
-
-import (
- "hash/crc32"
- "net/http"
- "strconv"
-
- "google.golang.org/appengine/user"
-)
-
-// Login causes the provided Request to act as though issued by the given user.
-func Login(u *user.User, req *http.Request) {
- req.Header.Set("X-AppEngine-User-Email", u.Email)
- id := u.ID
- if id == "" {
- id = strconv.Itoa(int(crc32.Checksum([]byte(u.Email), crc32.IEEETable)))
- }
- req.Header.Set("X-AppEngine-User-Id", id)
- req.Header.Set("X-AppEngine-User-Federated-Identity", u.Email)
- req.Header.Set("X-AppEngine-User-Federated-Provider", u.FederatedProvider)
- if u.Admin {
- req.Header.Set("X-AppEngine-User-Is-Admin", "1")
- } else {
- req.Header.Set("X-AppEngine-User-Is-Admin", "0")
- }
-}
-
-// Logout causes the provided Request to act as though issued by a logged-out
-// user.
-func Logout(req *http.Request) {
- req.Header.Del("X-AppEngine-User-Email")
- req.Header.Del("X-AppEngine-User-Id")
- req.Header.Del("X-AppEngine-User-Is-Admin")
- req.Header.Del("X-AppEngine-User-Federated-Identity")
- req.Header.Del("X-AppEngine-User-Federated-Provider")
-}
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
deleted file mode 100644
index 52e6ee3..0000000
--- a/vendor/google.golang.org/appengine/appengine.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package appengine provides basic functionality for Google App Engine.
-//
-// For more information on how to write Go apps for Google App Engine, see:
-// https://cloud.google.com/appengine/docs/go/
-package appengine
-
-import (
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
-)
-
-// IsDevAppServer reports whether the App Engine app is running in the
-// development App Server.
-func IsDevAppServer() bool {
- return internal.IsDevAppServer()
-}
-
-// NewContext returns a context for an in-flight HTTP request.
-// This function is cheap.
-func NewContext(req *http.Request) context.Context {
- return WithContext(context.Background(), req)
-}
-
-// WithContext returns a copy of the parent context
-// and associates it with an in-flight HTTP request.
-// This function is cheap.
-func WithContext(parent context.Context, req *http.Request) context.Context {
- return internal.WithContext(parent, req)
-}
-
-// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
-
-// BlobKey is a key for a blobstore blob.
-//
-// Conceptually, this type belongs in the blobstore package, but it lives in
-// the appengine package to avoid a circular dependency: blobstore depends on
-// datastore, and datastore needs to refer to the BlobKey type.
-type BlobKey string
-
-// GeoPoint represents a location as latitude/longitude in degrees.
-type GeoPoint struct {
- Lat, Lng float64
-}
-
-// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
-func (g GeoPoint) Valid() bool {
- return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
-}
-
-// APICallFunc defines a function type for handling an API call.
-// See WithCallOverride.
-type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
-
-// WithCallOverride returns a copy of the parent context
-// that will cause API calls to invoke f instead of their normal operation.
-//
-// This is intended for advanced users only.
-func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
- return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
-}
-
-// APICall performs an API call.
-//
-// This is not intended for general use; it is exported for use in conjunction
-// with WithAPICallFunc.
-func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
- return internal.Call(ctx, service, method, in, out)
-}
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
deleted file mode 100644
index 2f77590..0000000
--- a/vendor/google.golang.org/appengine/appengine_vm.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package appengine
-
-import (
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
-)
-
-// The comment below must not be changed.
-// It is used by go-app-builder to recognise that this package has
-// the Main function to use in the synthetic main.
-// The gophers party all night; the rabbits provide the beats.
-
-// Main is the principal entry point for a Managed VMs app.
-// It installs a trivial health checker if one isn't already registered,
-// and starts listening on port 8080 (overridden by the $PORT environment
-// variable).
-//
-// See https://cloud.google.com/appengine/docs/managed-vms/custom-runtimes#health_check_requests
-// for details on how to do your own health checking.
-//
-// Main never returns.
-//
-// Main is designed so that the app's main package looks like this:
-//
-// package main
-//
-// import (
-// "google.golang.org/appengine"
-//
-// _ "myapp/package0"
-// _ "myapp/package1"
-// )
-//
-// func main() {
-// appengine.Main()
-// }
-//
-// The "myapp/packageX" packages are expected to register HTTP handlers
-// in their init functions.
-func Main() {
- internal.Main()
-}
-
-// BackgroundContext returns a context not associated with a request.
-// This should only be used when not servicing a request.
-// This only works on Managed VMs.
-func BackgroundContext() context.Context {
- return internal.BackgroundContext()
-}
diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore.go b/vendor/google.golang.org/appengine/blobstore/blobstore.go
deleted file mode 100644
index e9f3df9..0000000
--- a/vendor/google.golang.org/appengine/blobstore/blobstore.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package blobstore provides a client for App Engine's persistent blob
-// storage service.
-package blobstore
-
-import (
- "bufio"
- "encoding/base64"
- "fmt"
- "io"
- "io/ioutil"
- "mime"
- "mime/multipart"
- "net/http"
- "net/textproto"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
-
- "google.golang.org/appengine"
- "google.golang.org/appengine/datastore"
- "google.golang.org/appengine/internal"
-
- basepb "google.golang.org/appengine/internal/base"
- blobpb "google.golang.org/appengine/internal/blobstore"
-)
-
-const (
- blobInfoKind = "__BlobInfo__"
- blobFileIndexKind = "__BlobFileIndex__"
- zeroKey = appengine.BlobKey("")
-)
-
-// BlobInfo is the blob metadata that is stored in the datastore.
-// Filename may be empty.
-type BlobInfo struct {
- BlobKey appengine.BlobKey
- ContentType string `datastore:"content_type"`
- CreationTime time.Time `datastore:"creation"`
- Filename string `datastore:"filename"`
- Size int64 `datastore:"size"`
- MD5 string `datastore:"md5_hash"`
-
- // ObjectName is the Google Cloud Storage name for this blob.
- ObjectName string `datastore:"gs_object_name"`
-}
-
-// isErrFieldMismatch returns whether err is a datastore.ErrFieldMismatch.
-//
-// The blobstore stores blob metadata in the datastore. When loading that
-// metadata, it may contain fields that we don't care about. datastore.Get will
-// return datastore.ErrFieldMismatch in that case, so we ignore that specific
-// error.
-func isErrFieldMismatch(err error) bool {
- _, ok := err.(*datastore.ErrFieldMismatch)
- return ok
-}
-
-// Stat returns the BlobInfo for a provided blobKey. If no blob was found for
-// that key, Stat returns datastore.ErrNoSuchEntity.
-func Stat(c context.Context, blobKey appengine.BlobKey) (*BlobInfo, error) {
- c, _ = appengine.Namespace(c, "") // Blobstore is always in the empty string namespace
- dskey := datastore.NewKey(c, blobInfoKind, string(blobKey), 0, nil)
- bi := &BlobInfo{
- BlobKey: blobKey,
- }
- if err := datastore.Get(c, dskey, bi); err != nil && !isErrFieldMismatch(err) {
- return nil, err
- }
- return bi, nil
-}
-
-// Send sets the headers on response to instruct App Engine to send a blob as
-// the response body. This is more efficient than reading and writing it out
-// manually and isn't subject to normal response size limits.
-func Send(response http.ResponseWriter, blobKey appengine.BlobKey) {
- hdr := response.Header()
- hdr.Set("X-AppEngine-BlobKey", string(blobKey))
-
- if hdr.Get("Content-Type") == "" {
- // This value is known to dev_appserver to mean automatic.
- // In production this is remapped to the empty value which
- // means automatic.
- hdr.Set("Content-Type", "application/vnd.google.appengine.auto")
- }
-}
-
-// UploadURL creates an upload URL for the form that the user will
-// fill out, passing the application path to load when the POST of the
-// form is completed. These URLs expire and should not be reused. The
-// opts parameter may be nil.
-func UploadURL(c context.Context, successPath string, opts *UploadURLOptions) (*url.URL, error) {
- req := &blobpb.CreateUploadURLRequest{
- SuccessPath: proto.String(successPath),
- }
- if opts != nil {
- if n := opts.MaxUploadBytes; n != 0 {
- req.MaxUploadSizeBytes = &n
- }
- if n := opts.MaxUploadBytesPerBlob; n != 0 {
- req.MaxUploadSizePerBlobBytes = &n
- }
- if s := opts.StorageBucket; s != "" {
- req.GsBucketName = &s
- }
- }
- res := &blobpb.CreateUploadURLResponse{}
- if err := internal.Call(c, "blobstore", "CreateUploadURL", req, res); err != nil {
- return nil, err
- }
- return url.Parse(*res.Url)
-}
-
-// UploadURLOptions are the options to create an upload URL.
-type UploadURLOptions struct {
- MaxUploadBytes int64 // optional
- MaxUploadBytesPerBlob int64 // optional
-
- // StorageBucket specifies the Google Cloud Storage bucket in which
- // to store the blob.
- // This is required if you use Cloud Storage instead of Blobstore.
- // Your application must have permission to write to the bucket.
- // You may optionally specify a bucket name and path in the format
- // "bucket_name/path", in which case the included path will be the
- // prefix of the uploaded object's name.
- StorageBucket string
-}
-
-// Delete deletes a blob.
-func Delete(c context.Context, blobKey appengine.BlobKey) error {
- return DeleteMulti(c, []appengine.BlobKey{blobKey})
-}
-
-// DeleteMulti deletes multiple blobs.
-func DeleteMulti(c context.Context, blobKey []appengine.BlobKey) error {
- s := make([]string, len(blobKey))
- for i, b := range blobKey {
- s[i] = string(b)
- }
- req := &blobpb.DeleteBlobRequest{
- BlobKey: s,
- }
- res := &basepb.VoidProto{}
- if err := internal.Call(c, "blobstore", "DeleteBlob", req, res); err != nil {
- return err
- }
- return nil
-}
-
-func errorf(format string, args ...interface{}) error {
- return fmt.Errorf("blobstore: "+format, args...)
-}
-
-// ParseUpload parses the synthetic POST request that your app gets from
-// App Engine after a user's successful upload of blobs. Given the request,
-// ParseUpload returns a map of the blobs received (keyed by HTML form
-// element name) and other non-blob POST parameters.
-func ParseUpload(req *http.Request) (blobs map[string][]*BlobInfo, other url.Values, err error) {
- _, params, err := mime.ParseMediaType(req.Header.Get("Content-Type"))
- if err != nil {
- return nil, nil, err
- }
- boundary := params["boundary"]
- if boundary == "" {
- return nil, nil, errorf("did not find MIME multipart boundary")
- }
-
- blobs = make(map[string][]*BlobInfo)
- other = make(url.Values)
-
- mreader := multipart.NewReader(io.MultiReader(req.Body, strings.NewReader("\r\n\r\n")), boundary)
- for {
- part, perr := mreader.NextPart()
- if perr == io.EOF {
- break
- }
- if perr != nil {
- return nil, nil, errorf("error reading next mime part with boundary %q (len=%d): %v",
- boundary, len(boundary), perr)
- }
-
- bi := &BlobInfo{}
- ctype, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition"))
- if err != nil {
- return nil, nil, err
- }
- bi.Filename = params["filename"]
- formKey := params["name"]
-
- ctype, params, err = mime.ParseMediaType(part.Header.Get("Content-Type"))
- if err != nil {
- return nil, nil, err
- }
- bi.BlobKey = appengine.BlobKey(params["blob-key"])
- if ctype != "message/external-body" || bi.BlobKey == "" {
- if formKey != "" {
- slurp, serr := ioutil.ReadAll(part)
- if serr != nil {
- return nil, nil, errorf("error reading %q MIME part", formKey)
- }
- other[formKey] = append(other[formKey], string(slurp))
- }
- continue
- }
-
- // App Engine sends a MIME header as the body of each MIME part.
- tp := textproto.NewReader(bufio.NewReader(part))
- header, mimeerr := tp.ReadMIMEHeader()
- if mimeerr != nil {
- return nil, nil, mimeerr
- }
- bi.Size, err = strconv.ParseInt(header.Get("Content-Length"), 10, 64)
- if err != nil {
- return nil, nil, err
- }
- bi.ContentType = header.Get("Content-Type")
-
- // Parse the time from the MIME header like:
- // X-AppEngine-Upload-Creation: 2011-03-15 21:38:34.712136
- createDate := header.Get("X-AppEngine-Upload-Creation")
- if createDate == "" {
- return nil, nil, errorf("expected to find an X-AppEngine-Upload-Creation header")
- }
- bi.CreationTime, err = time.Parse("2006-01-02 15:04:05.000000", createDate)
- if err != nil {
- return nil, nil, errorf("error parsing X-AppEngine-Upload-Creation: %s", err)
- }
-
- if hdr := header.Get("Content-MD5"); hdr != "" {
- md5, err := base64.URLEncoding.DecodeString(hdr)
- if err != nil {
- return nil, nil, errorf("bad Content-MD5 %q: %v", hdr, err)
- }
- bi.MD5 = string(md5)
- }
-
- // If the GCS object name was provided, record it.
- bi.ObjectName = header.Get("X-AppEngine-Cloud-Storage-Object")
-
- blobs[formKey] = append(blobs[formKey], bi)
- }
- return
-}
-
-// Reader is a blob reader.
-type Reader interface {
- io.Reader
- io.ReaderAt
- io.Seeker
-}
-
-// NewReader returns a reader for a blob. It always succeeds; if the blob does
-// not exist then an error will be reported upon first read.
-func NewReader(c context.Context, blobKey appengine.BlobKey) Reader {
- return openBlob(c, blobKey)
-}
-
-// BlobKeyForFile returns a BlobKey for a Google Storage file.
-// The filename should be of the form "/gs/bucket_name/object_name".
-func BlobKeyForFile(c context.Context, filename string) (appengine.BlobKey, error) {
- req := &blobpb.CreateEncodedGoogleStorageKeyRequest{
- Filename: &filename,
- }
- res := &blobpb.CreateEncodedGoogleStorageKeyResponse{}
- if err := internal.Call(c, "blobstore", "CreateEncodedGoogleStorageKey", req, res); err != nil {
- return "", err
- }
- return appengine.BlobKey(*res.BlobKey), nil
-}
diff --git a/vendor/google.golang.org/appengine/blobstore/read.go b/vendor/google.golang.org/appengine/blobstore/read.go
deleted file mode 100644
index 578b1f5..0000000
--- a/vendor/google.golang.org/appengine/blobstore/read.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2012 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package blobstore
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "sync"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
-
- "google.golang.org/appengine"
- "google.golang.org/appengine/internal"
-
- blobpb "google.golang.org/appengine/internal/blobstore"
-)
-
-// openBlob returns a reader for a blob. It always succeeds; if the blob does
-// not exist then an error will be reported upon first read.
-func openBlob(c context.Context, blobKey appengine.BlobKey) Reader {
- return &reader{
- c: c,
- blobKey: blobKey,
- }
-}
-
-const readBufferSize = 256 * 1024
-
-// reader is a blob reader. It implements the Reader interface.
-type reader struct {
- c context.Context
-
- // Either blobKey or filename is set:
- blobKey appengine.BlobKey
- filename string
-
- closeFunc func() // is nil if unavailable or already closed.
-
- // buf is the read buffer. r is how much of buf has been read.
- // off is the offset of buf[0] relative to the start of the blob.
- // An invariant is 0 <= r && r <= len(buf).
- // Reads that don't require an RPC call will increment r but not off.
- // Seeks may modify r without discarding the buffer, but only if the
- // invariant can be maintained.
- mu sync.Mutex
- buf []byte
- r int
- off int64
-}
-
-func (r *reader) Close() error {
- if f := r.closeFunc; f != nil {
- f()
- }
- r.closeFunc = nil
- return nil
-}
-
-func (r *reader) Read(p []byte) (int, error) {
- if len(p) == 0 {
- return 0, nil
- }
- r.mu.Lock()
- defer r.mu.Unlock()
- if r.r == len(r.buf) {
- if err := r.fetch(r.off + int64(r.r)); err != nil {
- return 0, err
- }
- }
- n := copy(p, r.buf[r.r:])
- r.r += n
- return n, nil
-}
-
-func (r *reader) ReadAt(p []byte, off int64) (int, error) {
- if len(p) == 0 {
- return 0, nil
- }
- r.mu.Lock()
- defer r.mu.Unlock()
- // Convert relative offsets to absolute offsets.
- ab0 := r.off + int64(r.r)
- ab1 := r.off + int64(len(r.buf))
- ap0 := off
- ap1 := off + int64(len(p))
- // Check if we can satisfy the read entirely out of the existing buffer.
- if r.off <= ap0 && ap1 <= ab1 {
- // Convert off from an absolute offset to a relative offset.
- rp0 := int(ap0 - r.off)
- return copy(p, r.buf[rp0:]), nil
- }
- // Restore the original Read/Seek offset after ReadAt completes.
- defer r.seek(ab0)
- // Repeatedly fetch and copy until we have filled p.
- n := 0
- for len(p) > 0 {
- if err := r.fetch(off + int64(n)); err != nil {
- return n, err
- }
- r.r = copy(p, r.buf)
- n += r.r
- p = p[r.r:]
- }
- return n, nil
-}
-
-func (r *reader) Seek(offset int64, whence int) (ret int64, err error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- switch whence {
- case os.SEEK_SET:
- ret = offset
- case os.SEEK_CUR:
- ret = r.off + int64(r.r) + offset
- case os.SEEK_END:
- return 0, errors.New("seeking relative to the end of a blob isn't supported")
- default:
- return 0, fmt.Errorf("invalid Seek whence value: %d", whence)
- }
- if ret < 0 {
- return 0, errors.New("negative Seek offset")
- }
- return r.seek(ret)
-}
-
-// fetch fetches readBufferSize bytes starting at the given offset. On success,
-// the data is saved as r.buf.
-func (r *reader) fetch(off int64) error {
- req := &blobpb.FetchDataRequest{
- BlobKey: proto.String(string(r.blobKey)),
- StartIndex: proto.Int64(off),
- EndIndex: proto.Int64(off + readBufferSize - 1), // EndIndex is inclusive.
- }
- res := &blobpb.FetchDataResponse{}
- if err := internal.Call(r.c, "blobstore", "FetchData", req, res); err != nil {
- return err
- }
- if len(res.Data) == 0 {
- return io.EOF
- }
- r.buf, r.r, r.off = res.Data, 0, off
- return nil
-}
-
-// seek seeks to the given offset with an effective whence equal to SEEK_SET.
-// It discards the read buffer if the invariant cannot be maintained.
-func (r *reader) seek(off int64) (int64, error) {
- delta := off - r.off
- if delta >= 0 && delta < int64(len(r.buf)) {
- r.r = int(delta)
- return off, nil
- }
- r.buf, r.r, r.off = nil, 0, off
- return off, nil
-}
diff --git a/vendor/google.golang.org/appengine/capability/capability.go b/vendor/google.golang.org/appengine/capability/capability.go
deleted file mode 100644
index 26edad7..0000000
--- a/vendor/google.golang.org/appengine/capability/capability.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-/*
-Package capability exposes information about outages and scheduled downtime
-for specific API capabilities.
-
-This package does not work on Managed VMs.
-
-Example:
- if !capability.Enabled(c, "datastore_v3", "write") {
- // show user a different page
- }
-*/
-package capability
-
-import (
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
- "google.golang.org/appengine/log"
-
- pb "google.golang.org/appengine/internal/capability"
-)
-
-// Enabled returns whether an API's capabilities are enabled.
-// The wildcard "*" capability matches every capability of an API.
-// If the underlying RPC fails (if the package is unknown, for example),
-// false is returned and information is written to the application log.
-func Enabled(ctx context.Context, api, capability string) bool {
- req := &pb.IsEnabledRequest{
- Package: &api,
- Capability: []string{capability},
- }
- res := &pb.IsEnabledResponse{}
- if err := internal.Call(ctx, "capability_service", "IsEnabled", req, res); err != nil {
- log.Warningf(ctx, "capability.Enabled: RPC failed: %v", err)
- return false
- }
- switch *res.SummaryStatus {
- case pb.IsEnabledResponse_ENABLED,
- pb.IsEnabledResponse_SCHEDULED_FUTURE,
- pb.IsEnabledResponse_SCHEDULED_NOW:
- return true
- case pb.IsEnabledResponse_UNKNOWN:
- log.Errorf(ctx, "capability.Enabled: unknown API capability %s/%s", api, capability)
- return false
- default:
- return false
- }
-}
diff --git a/vendor/google.golang.org/appengine/channel/channel.go b/vendor/google.golang.org/appengine/channel/channel.go
deleted file mode 100644
index 004f5dd..0000000
--- a/vendor/google.golang.org/appengine/channel/channel.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-/*
-Package channel implements the server side of App Engine's Channel API.
-
-Create creates a new channel associated with the given clientID,
-which must be unique to the client that will use the returned token.
-
- token, err := channel.Create(c, "player1")
- if err != nil {
- // handle error
- }
- // return token to the client in an HTTP response
-
-Send sends a message to the client over the channel identified by clientID.
-
- channel.Send(c, "player1", "Game over!")
-*/
-package channel
-
-import (
- "encoding/json"
-
- "golang.org/x/net/context"
-
- "google.golang.org/appengine"
- "google.golang.org/appengine/internal"
- basepb "google.golang.org/appengine/internal/base"
- pb "google.golang.org/appengine/internal/channel"
-)
-
-// Create creates a channel and returns a token for use by the client.
-// The clientID is an application-provided string used to identify the client.
-func Create(c context.Context, clientID string) (token string, err error) {
- req := &pb.CreateChannelRequest{
- ApplicationKey: &clientID,
- }
- resp := &pb.CreateChannelResponse{}
- err = internal.Call(c, service, "CreateChannel", req, resp)
- token = resp.GetToken()
- return token, remapError(err)
-}
-
-// Send sends a message on the channel associated with clientID.
-func Send(c context.Context, clientID, message string) error {
- req := &pb.SendMessageRequest{
- ApplicationKey: &clientID,
- Message: &message,
- }
- resp := &basepb.VoidProto{}
- return remapError(internal.Call(c, service, "SendChannelMessage", req, resp))
-}
-
-// SendJSON is a helper function that sends a JSON-encoded value
-// on the channel associated with clientID.
-func SendJSON(c context.Context, clientID string, value interface{}) error {
- m, err := json.Marshal(value)
- if err != nil {
- return err
- }
- return Send(c, clientID, string(m))
-}
-
-// remapError fixes any APIError referencing "xmpp" into one referencing "channel".
-func remapError(err error) error {
- if e, ok := err.(*internal.APIError); ok {
- if e.Service == "xmpp" {
- e.Service = "channel"
- }
- }
- return err
-}
-
-var service = "xmpp" // prod
-
-func init() {
- if appengine.IsDevAppServer() {
- service = "channel" // dev
- }
- internal.RegisterErrorCodeMap("channel", pb.ChannelServiceError_ErrorCode_name)
-}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go
deleted file mode 100644
index 795fba1..0000000
--- a/vendor/google.golang.org/appengine/cloudsql/cloudsql.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2013 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-/*
-Package cloudsql exposes access to Google Cloud SQL databases.
-
-This package does not work on Managed VMs.
-
-This package is intended for MySQL drivers to make App Engine-specific
-connections. Applications should use this package through database/sql:
-Select a pure Go MySQL driver that supports this package, and use sql.Open
-with protocol "cloudsql" and an address of the Cloud SQL instance.
-
-A Go MySQL driver that has been tested to work well with Cloud SQL
-is the go-sql-driver:
- import "database/sql"
- import _ "github.com/go-sql-driver/mysql"
-
- db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname")
-
-
-Another driver that works well with Cloud SQL is the mymysql driver:
- import "database/sql"
- import _ "github.com/ziutek/mymysql/godrv"
-
- db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password")
-
-
-Using either of these drivers, you can perform a standard SQL query.
-This example assumes there is a table named 'users' with
-columns 'first_name' and 'last_name':
-
- rows, err := db.Query("SELECT first_name, last_name FROM users")
- if err != nil {
- log.Errorf(ctx, "db.Query: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var firstName string
- var lastName string
- if err := rows.Scan(&firstName, &lastName); err != nil {
- log.Errorf(ctx, "rows.Scan: %v", err)
- continue
- }
- log.Infof(ctx, "First: %v - Last: %v", firstName, lastName)
- }
- if err := rows.Err(); err != nil {
- log.Errorf(ctx, "Row error: %v", err)
- }
-*/
-package cloudsql
-
-import (
- "net"
-)
-
-// Dial connects to the named Cloud SQL instance.
-func Dial(instance string) (net.Conn, error) {
- return connect(instance)
-}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
deleted file mode 100644
index af62dba..0000000
--- a/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2013 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package cloudsql
-
-import (
- "net"
-
- "appengine/cloudsql"
-)
-
-func connect(instance string) (net.Conn, error) {
- return cloudsql.Dial(instance)
-}
diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
deleted file mode 100644
index c3f76f2..0000000
--- a/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package cloudsql
-
-import (
- "errors"
- "net"
-)
-
-func connect(instance string) (net.Conn, error) {
- return nil, errors.New("cloudsql: not supported in Managed VMs")
-}
diff --git a/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go
deleted file mode 100644
index 9080ce2..0000000
--- a/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go
+++ /dev/null
@@ -1,342 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Program aebundler turns a Go app into a fully self-contained tar file.
-// The app and its subdirectories (if any) are placed under "."
-// and the dependencies from $GOPATH are placed under ./_gopath/src.
-// A main func is synthesized if one does not exist.
-//
-// A sample Dockerfile to be used with this bundler could look like this:
-// FROM gcr.io/google_appengine/go-compat
-// ADD . /app
-// RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe
-package main
-
-import (
- "archive/tar"
- "flag"
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
-)
-
-var (
- output = flag.String("o", "", "name of output tar file or '-' for stdout")
- rootDir = flag.String("root", ".", "directory name of application root")
- vm = flag.Bool("vm", true, "bundle a Managed VM app")
-
- skipFiles = map[string]bool{
- ".git": true,
- ".gitconfig": true,
- ".hg": true,
- ".travis.yml": true,
- }
-)
-
-const (
- newMain = `package main
-import "google.golang.org/appengine"
-func main() {
- appengine.Main()
-}
-`
-)
-
-func usage() {
- fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
- fmt.Fprintf(os.Stderr, "\t%s -o
- {{with .Email}}You are currently logged in as {{.}}.{{end}}
- {{with .Login}}Sign in{{end}}
- {{with .Logout}}Sign out{{end}}
-
- {{with .Author}}{{.}}{{else}}An anonymous person{{end}}
- on {{.Date.Format "3:04pm, Mon 2 Jan"}}
- wrote {{.Content}}
-
-Hello, World! 세상아 안녕! -
- --This instance has been running for {{.}}. -
- - -`)) diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go deleted file mode 100644 index 16d0772..0000000 --- a/vendor/google.golang.org/appengine/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// This file provides error functions for common API failure modes. - -package appengine - -import ( - "fmt" - - "google.golang.org/appengine/internal" -) - -// IsOverQuota reports whether err represents an API call failure -// due to insufficient available quota. -func IsOverQuota(err error) bool { - callErr, ok := err.(*internal.CallError) - return ok && callErr.Code == 4 -} - -// MultiError is returned by batch operations when there are errors with -// particular elements. Errors will be in a one-to-one correspondence with -// the input elements; successful elements will have a nil entry. -type MultiError []error - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/google.golang.org/appengine/file/file.go b/vendor/google.golang.org/appengine/file/file.go deleted file mode 100644 index c3cd58b..0000000 --- a/vendor/google.golang.org/appengine/file/file.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package file provides helper functions for using Google Cloud Storage. -package file - -import ( - "fmt" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - aipb "google.golang.org/appengine/internal/app_identity" -) - -// DefaultBucketName returns the name of this application's -// default Google Cloud Storage bucket. -func DefaultBucketName(c context.Context) (string, error) { - req := &aipb.GetDefaultGcsBucketNameRequest{} - res := &aipb.GetDefaultGcsBucketNameResponse{} - - err := internal.Call(c, "app_identity_service", "GetDefaultGcsBucketName", req, res) - if err != nil { - return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res) - } - return res.GetDefaultGcsBucketName(), nil -} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go deleted file mode 100644 index b8dcf8f..0000000 --- a/vendor/google.golang.org/appengine/identity.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "time" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/app_identity" - modpb "google.golang.org/appengine/internal/modules" -) - -// AppID returns the application ID for the current application. -// The string will be a plain application ID (e.g. "appid"), with a -// domain prefix for custom domain deployments (e.g. "example.com:appid"). -func AppID(c context.Context) string { return internal.AppID(c) } - -// DefaultVersionHostname returns the standard hostname of the default version -// of the current application (e.g. "my-app.appspot.com"). This is suitable for -// use in constructing URLs. -func DefaultVersionHostname(c context.Context) string { - return internal.DefaultVersionHostname(c) -} - -// ModuleName returns the module name of the current instance. -func ModuleName(c context.Context) string { - return internal.ModuleName(c) -} - -// ModuleHostname returns a hostname of a module instance. -// If module is the empty string, it refers to the module of the current instance. -// If version is empty, it refers to the version of the current instance if valid, -// or the default version of the module of the current instance. -// If instance is empty, ModuleHostname returns the load-balancing hostname. -func ModuleHostname(c context.Context, module, version, instance string) (string, error) { - req := &modpb.GetHostnameRequest{} - if module != "" { - req.Module = &module - } - if version != "" { - req.Version = &version - } - if instance != "" { - req.Instance = &instance - } - res := &modpb.GetHostnameResponse{} - if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { - return "", err - } - return *res.Hostname, nil -} - -// VersionID returns the version ID for the current application. -// It will be of the form "X.Y", where X is specified in app.yaml, -// and Y is a number generated when each version of the app is uploaded. -// It does not include a module name. -func VersionID(c context.Context) string { return internal.VersionID(c) } - -// InstanceID returns a mostly-unique identifier for this instance. -func InstanceID() string { return internal.InstanceID() } - -// Datacenter returns an identifier for the datacenter that the instance is running in. -func Datacenter(c context.Context) string { return internal.Datacenter(c) } - -// ServerSoftware returns the App Engine release version. -// In production, it looks like "Google App Engine/X.Y.Z". -// In the development appserver, it looks like "Development/X.Y". -func ServerSoftware() string { return internal.ServerSoftware() } - -// RequestID returns a string that uniquely identifies the request. -func RequestID(c context.Context) string { return internal.RequestID(c) } - -// AccessToken generates an OAuth2 access token for the specified scopes on -// behalf of service account of this application. This token will expire after -// the returned time. -func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { - req := &pb.GetAccessTokenRequest{Scope: scopes} - res := &pb.GetAccessTokenResponse{} - - err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) - if err != nil { - return "", time.Time{}, err - } - return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil -} - -// Certificate represents a public certificate for the app. -type Certificate struct { - KeyName string - Data []byte // PEM-encoded X.509 certificate -} - -// PublicCertificates retrieves the public certificates for the app. -// They can be used to verify a signature returned by SignBytes. -func PublicCertificates(c context.Context) ([]Certificate, error) { - req := &pb.GetPublicCertificateForAppRequest{} - res := &pb.GetPublicCertificateForAppResponse{} - if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { - return nil, err - } - var cs []Certificate - for _, pc := range res.PublicCertificateList { - cs = append(cs, Certificate{ - KeyName: pc.GetKeyName(), - Data: []byte(pc.GetX509CertificatePem()), - }) - } - return cs, nil -} - -// ServiceAccount returns a string representing the service account name, in -// the form of an email address (typically app_id@appspot.gserviceaccount.com). -func ServiceAccount(c context.Context) (string, error) { - req := &pb.GetServiceAccountNameRequest{} - res := &pb.GetServiceAccountNameResponse{} - - err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) - if err != nil { - return "", err - } - return res.GetServiceAccountName(), err -} - -// SignBytes signs bytes using a private key unique to your application. -func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { - req := &pb.SignForAppRequest{BytesToSign: bytes} - res := &pb.SignForAppResponse{} - - if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { - return "", nil, err - } - return res.GetKeyName(), res.GetSignatureBytes(), nil -} - -func init() { - internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) - internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/image/image.go b/vendor/google.golang.org/appengine/image/image.go deleted file mode 100644 index 780d53e..0000000 --- a/vendor/google.golang.org/appengine/image/image.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package image provides image services. -package image - -import ( - "fmt" - "net/url" - - "golang.org/x/net/context" - - "google.golang.org/appengine" - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/image" -) - -type ServingURLOptions struct { - Secure bool // whether the URL should use HTTPS - - // Size must be between zero and 1600. - // If Size is non-zero, a resized version of the image is served, - // and Size is the served image's longest dimension. The aspect ratio is preserved. - // If Crop is true the image is cropped from the center instead of being resized. - Size int - Crop bool -} - -// ServingURL returns a URL that will serve an image from Blobstore. -func ServingURL(c context.Context, key appengine.BlobKey, opts *ServingURLOptions) (*url.URL, error) { - req := &pb.ImagesGetUrlBaseRequest{ - BlobKey: (*string)(&key), - } - if opts != nil && opts.Secure { - req.CreateSecureUrl = &opts.Secure - } - res := &pb.ImagesGetUrlBaseResponse{} - if err := internal.Call(c, "images", "GetUrlBase", req, res); err != nil { - return nil, err - } - - // The URL may have suffixes added to dynamically resize or crop: - // - adding "=s32" will serve the image resized to 32 pixels, preserving the aspect ratio. - // - adding "=s32-c" is the same as "=s32" except it will be cropped. - u := *res.Url - if opts != nil && opts.Size > 0 { - u += fmt.Sprintf("=s%d", opts.Size) - if opts.Crop { - u += "-c" - } - } - return url.Parse(u) -} - -// DeleteServingURL deletes the serving URL for an image. -func DeleteServingURL(c context.Context, key appengine.BlobKey) error { - req := &pb.ImagesDeleteUrlBaseRequest{ - BlobKey: (*string)(&key), - } - res := &pb.ImagesDeleteUrlBaseResponse{} - return internal.Call(c, "images", "DeleteUrlBase", req, res) -} - -func init() { - internal.RegisterErrorCodeMap("images", pb.ImagesServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/internal/aetesting/fake.go b/vendor/google.golang.org/appengine/internal/aetesting/fake.go deleted file mode 100644 index 6f5c197..0000000 --- a/vendor/google.golang.org/appengine/internal/aetesting/fake.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package aetesting provides utilities for testing App Engine packages. -// This is not for testing user applications. -package aetesting - -import ( - "fmt" - "reflect" - "testing" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// FakeSingleContext returns a context whose Call invocations will be serviced -// by f, which should be a function that has two arguments of the input and output -// protocol buffer type, and one error return. -func FakeSingleContext(t *testing.T, service, method string, f interface{}) context.Context { - fv := reflect.ValueOf(f) - if fv.Kind() != reflect.Func { - t.Fatal("not a function") - } - ft := fv.Type() - if ft.NumIn() != 2 || ft.NumOut() != 1 { - t.Fatalf("f has %d in and %d out, want 2 in and 1 out", ft.NumIn(), ft.NumOut()) - } - for i := 0; i < 2; i++ { - at := ft.In(i) - if !at.Implements(protoMessageType) { - t.Fatalf("arg %d does not implement proto.Message", i) - } - } - if ft.Out(0) != errorType { - t.Fatalf("f's return is %v, want error", ft.Out(0)) - } - s := &single{ - t: t, - service: service, - method: method, - f: fv, - } - return internal.WithCallOverride(context.Background(), s.call) -} - -var ( - protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() - errorType = reflect.TypeOf((*error)(nil)).Elem() -) - -type single struct { - t *testing.T - service, method string - f reflect.Value -} - -func (s *single) call(ctx context.Context, service, method string, in, out proto.Message) error { - if service == "__go__" { - if method == "GetNamespace" { - return nil // always yield an empty namespace - } - return fmt.Errorf("Unknown API call /%s.%s", service, method) - } - if service != s.service || method != s.method { - s.t.Fatalf("Unexpected call to /%s.%s", service, method) - } - ins := []reflect.Value{ - reflect.ValueOf(in), - reflect.ValueOf(out), - } - outs := s.f.Call(ins) - if outs[0].IsNil() { - return nil - } - return outs[0].Interface().(error) -} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go deleted file mode 100644 index aa139d4..0000000 --- a/vendor/google.golang.org/appengine/internal/api.go +++ /dev/null @@ -1,640 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - }, - } -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - stopFlushing := make(chan int) - - ctxs.Lock() - ctxs.m[r] = c - ctxs.Unlock() - defer func() { - ctxs.Lock() - delete(ctxs.m, r) - ctxs.Unlock() - }() - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - go c.flushLog(false) - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -var ctxs = struct { - sync.Mutex - m map[*http.Request]*context - bg *context // background context, lazily initialized - // dec is used by tests to decorate the netcontext.Context returned - // for a given request. This allows tests to add overrides (such as - // WithAppIDOverride) to the context. The map is nil outside tests. - dec map[*http.Request]func(netcontext.Context) netcontext.Context -}{ - m: make(map[*http.Request]*context), -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - ctxs.Lock() - c := ctxs.m[req] - d := ctxs.dec[req] - ctxs.Unlock() - - if d != nil { - parent = d(parent) - } - - if c == nil { - // Someone passed in an http.Request that is not in-flight. - // We panic here rather than panicking at a later point - // so that stack traces will be more sensible. - log.Panic("appengine: NewContext passed an unknown http.Request") - } - return withContext(parent, c) -} - -func BackgroundContext() netcontext.Context { - ctxs.Lock() - defer ctxs.Unlock() - - if ctxs.bg != nil { - return toContext(ctxs.bg) - } - - // Compute background security ticket. - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - - ctxs.bg = &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go ctxs.bg.logFlusher(make(chan int)) - - return toContext(ctxs.bg) -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { - c := &context{ - req: req, - apiURL: apiURL, - } - ctxs.Lock() - defer ctxs.Unlock() - if _, ok := ctxs.m[req]; ok { - log.Panic("req already associated with context") - } - if _, ok := ctxs.dec[req]; ok { - log.Panic("req already associated with context") - } - if ctxs.dec == nil { - ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) - } - ctxs.m[req] = c - ctxs.dec[req] = decorate - - return func() { - ctxs.Lock() - delete(ctxs.m, req) - delete(ctxs.dec, req) - ctxs.Unlock() - } -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errors.New("not an App Engine context") - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - log.Print(logLevelName[level] + ": " + s) -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go deleted file mode 100644 index 1c072e9..0000000 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "errors" - "net/http" - "time" - - "appengine" - "appengine_internal" - basepb "appengine_internal/base" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var contextKey = "holds an appengine.Context" - -func fromContext(ctx netcontext.Context) appengine.Context { - c, _ := ctx.Value(&contextKey).(appengine.Context) - return c -} - -// This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { - return fromContext(ctx) -} - -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - - s := &basepb.StringProto{} - c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) - if ns := s.GetValue(); ns != "" { - ctx = NamespacedContext(ctx, ns) - } - - return ctx -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - if req, ok := c.Request().(*http.Request); ok { - return req.Header - } - } - return nil -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - c := appengine.NewContext(req) - return withContext(parent, c) -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errors.New("not an App Engine context") - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - var opts *appengine_internal.CallOptions - if d, ok := ctx.Deadline(); ok { - opts = &appengine_internal.CallOptions{ - Timeout: d.Sub(time.Now()), - } - } - - err := c.Call(service, method, in, out, opts) - switch v := err.(type) { - case *appengine_internal.APIError: - return &APIError{ - Service: v.Service, - Detail: v.Detail, - Code: v.Code, - } - case *appengine_internal.CallError: - return &CallError{ - Detail: v.Detail, - Code: v.Code, - Timeout: v.Timeout, - } - } - return err -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") -} - -func logf(c appengine.Context, level int64, format string, args ...interface{}) { - var fn func(format string, args ...interface{}) - switch level { - case 0: - fn = c.Debugf - case 1: - fn = c.Infof - case 2: - fn = c.Warningf - case 3: - fn = c.Errorf - case 4: - fn = c.Criticalf - default: - // This shouldn't happen. - fn = c.Criticalf - } - fn(format, args...) -} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go deleted file mode 100644 index ec5383e..0000000 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error - -var callOverrideKey = "holds []CallOverrideFunc" - -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { - // We avoid appending to any existing call override - // so we don't risk overwriting a popped stack below. - var cofs []CallOverrideFunc - if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { - cofs = append(cofs, uf...) - } - cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) -} - -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { - cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) - if len(cofs) == 0 { - return nil, nil, false - } - // We found a list of overrides; grab the last, and reconstitute a - // context that will hide it. - f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) - return f, ctx, true -} - -type logOverrideFunc func(level int64, format string, args ...interface{}) - -var logOverrideKey = "holds a logOverrideFunc" - -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) -} - -var appIDOverrideKey = "holds a string, being the full app ID" - -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) -} - -var namespaceKey = "holds the namespace string" - -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) -} - -func NamespaceFromContext(ctx netcontext.Context) string { - // If there's no namespace, return the empty string. - ns, _ := ctx.Value(&namespaceKey).(string) - return ns -} - -// FullyQualifiedAppID returns the fully-qualified application ID. -// This may contain a partition prefix (e.g. "s~" for High Replication apps), -// or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { - if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { - return id - } - return fullyQualifiedAppID(ctx) -} - -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { - if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { - f(level, format, args...) - return - } - logf(fromContext(ctx), level, format, args...) -} - -// NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { - n := &namespacedContext{ - namespace: namespace, - } - return withNamespace(WithCallOverride(ctx, n.call), namespace) -} - -type namespacedContext struct { - namespace string -} - -func (n *namespacedContext) call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - // Apply any namespace mods. - if mod, ok := NamespaceMods[service]; ok { - mod(in, n.namespace) - } - return Call(ctx, service, method, in, out) -} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go deleted file mode 100644 index 11df8c0..0000000 --- a/vendor/google.golang.org/appengine/internal/app_id.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "strings" -) - -func parseFullAppID(appid string) (partition, domain, displayID string) { - if i := strings.Index(appid, "~"); i != -1 { - partition, appid = appid[:i], appid[i+1:] - } - if i := strings.Index(appid, ":"); i != -1 { - domain, appid = appid[:i], appid[i+1:] - } - return partition, domain, appid -} - -// appID returns "appid" or "domain.com:appid". -func appID(fullAppID string) string { - _, dom, dis := parseFullAppID(fullAppID) - if dom != "" { - return dom + ":" + dis - } - return dis -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go deleted file mode 100644 index 87d9701..0000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go +++ /dev/null @@ -1,296 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto -// DO NOT EDIT! - -/* -Package app_identity is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/app_identity/app_identity_service.proto - -It has these top-level messages: - AppIdentityServiceError - SignForAppRequest - SignForAppResponse - GetPublicCertificateForAppRequest - PublicCertificate - GetPublicCertificateForAppResponse - GetServiceAccountNameRequest - GetServiceAccountNameResponse - GetAccessTokenRequest - GetAccessTokenResponse - GetDefaultGcsBucketNameRequest - GetDefaultGcsBucketNameResponse -*/ -package app_identity - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type AppIdentityServiceError_ErrorCode int32 - -const ( - AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 - AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 - AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 - AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 - AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 - AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 - AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 - AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 -) - -var AppIdentityServiceError_ErrorCode_name = map[int32]string{ - 0: "SUCCESS", - 9: "UNKNOWN_SCOPE", - 1000: "BLOB_TOO_LARGE", - 1001: "DEADLINE_EXCEEDED", - 1002: "NOT_A_VALID_APP", - 1003: "UNKNOWN_ERROR", - 1005: "NOT_ALLOWED", - 1006: "NOT_IMPLEMENTED", -} -var AppIdentityServiceError_ErrorCode_value = map[string]int32{ - "SUCCESS": 0, - "UNKNOWN_SCOPE": 9, - "BLOB_TOO_LARGE": 1000, - "DEADLINE_EXCEEDED": 1001, - "NOT_A_VALID_APP": 1002, - "UNKNOWN_ERROR": 1003, - "NOT_ALLOWED": 1005, - "NOT_IMPLEMENTED": 1006, -} - -func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { - p := new(AppIdentityServiceError_ErrorCode) - *p = x - return p -} -func (x AppIdentityServiceError_ErrorCode) String() string { - return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) -} -func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") - if err != nil { - return err - } - *x = AppIdentityServiceError_ErrorCode(value) - return nil -} - -type AppIdentityServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } -func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } -func (*AppIdentityServiceError) ProtoMessage() {} - -type SignForAppRequest struct { - BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } -func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } -func (*SignForAppRequest) ProtoMessage() {} - -func (m *SignForAppRequest) GetBytesToSign() []byte { - if m != nil { - return m.BytesToSign - } - return nil -} - -type SignForAppResponse struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` - SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } -func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } -func (*SignForAppResponse) ProtoMessage() {} - -func (m *SignForAppResponse) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *SignForAppResponse) GetSignatureBytes() []byte { - if m != nil { - return m.SignatureBytes - } - return nil -} - -type GetPublicCertificateForAppRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } -func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppRequest) ProtoMessage() {} - -type PublicCertificate struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` - X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } -func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } -func (*PublicCertificate) ProtoMessage() {} - -func (m *PublicCertificate) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *PublicCertificate) GetX509CertificatePem() string { - if m != nil && m.X509CertificatePem != nil { - return *m.X509CertificatePem - } - return "" -} - -type GetPublicCertificateForAppResponse struct { - PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` - MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } -func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppResponse) ProtoMessage() {} - -func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { - if m != nil { - return m.PublicCertificateList - } - return nil -} - -func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { - if m != nil && m.MaxClientCacheTimeInSecond != nil { - return *m.MaxClientCacheTimeInSecond - } - return 0 -} - -type GetServiceAccountNameRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } -func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameRequest) ProtoMessage() {} - -type GetServiceAccountNameResponse struct { - ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } -func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameResponse) ProtoMessage() {} - -func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenRequest struct { - Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` - ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` - ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } -func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenRequest) ProtoMessage() {} - -func (m *GetAccessTokenRequest) GetScope() []string { - if m != nil { - return m.Scope - } - return nil -} - -func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { - if m != nil && m.ServiceAccountId != nil { - return *m.ServiceAccountId - } - return 0 -} - -func (m *GetAccessTokenRequest) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenResponse struct { - AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` - ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } -func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenResponse) ProtoMessage() {} - -func (m *GetAccessTokenResponse) GetAccessToken() string { - if m != nil && m.AccessToken != nil { - return *m.AccessToken - } - return "" -} - -func (m *GetAccessTokenResponse) GetExpirationTime() int64 { - if m != nil && m.ExpirationTime != nil { - return *m.ExpirationTime - } - return 0 -} - -type GetDefaultGcsBucketNameRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } -func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} - -type GetDefaultGcsBucketNameResponse struct { - DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } -func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} - -func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { - if m != nil && m.DefaultGcsBucketName != nil { - return *m.DefaultGcsBucketName - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto deleted file mode 100644 index 19610ca..0000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "app_identity"; - -package appengine; - -message AppIdentityServiceError { - enum ErrorCode { - SUCCESS = 0; - UNKNOWN_SCOPE = 9; - BLOB_TOO_LARGE = 1000; - DEADLINE_EXCEEDED = 1001; - NOT_A_VALID_APP = 1002; - UNKNOWN_ERROR = 1003; - NOT_ALLOWED = 1005; - NOT_IMPLEMENTED = 1006; - } -} - -message SignForAppRequest { - optional bytes bytes_to_sign = 1; -} - -message SignForAppResponse { - optional string key_name = 1; - optional bytes signature_bytes = 2; -} - -message GetPublicCertificateForAppRequest { -} - -message PublicCertificate { - optional string key_name = 1; - optional string x509_certificate_pem = 2; -} - -message GetPublicCertificateForAppResponse { - repeated PublicCertificate public_certificate_list = 1; - optional int64 max_client_cache_time_in_second = 2; -} - -message GetServiceAccountNameRequest { -} - -message GetServiceAccountNameResponse { - optional string service_account_name = 1; -} - -message GetAccessTokenRequest { - repeated string scope = 1; - optional int64 service_account_id = 2; - optional string service_account_name = 3; -} - -message GetAccessTokenResponse { - optional string access_token = 1; - optional int64 expiration_time = 2; -} - -message GetDefaultGcsBucketNameRequest { -} - -message GetDefaultGcsBucketNameResponse { - optional string default_gcs_bucket_name = 1; -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go deleted file mode 100644 index 36a1956..0000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go +++ /dev/null @@ -1,133 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/base/api_base.proto -// DO NOT EDIT! - -/* -Package base is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/base/api_base.proto - -It has these top-level messages: - StringProto - Integer32Proto - Integer64Proto - BoolProto - DoubleProto - BytesProto - VoidProto -*/ -package base - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type StringProto struct { - Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StringProto) Reset() { *m = StringProto{} } -func (m *StringProto) String() string { return proto.CompactTextString(m) } -func (*StringProto) ProtoMessage() {} - -func (m *StringProto) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Integer32Proto struct { - Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } -func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } -func (*Integer32Proto) ProtoMessage() {} - -func (m *Integer32Proto) GetValue() int32 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Integer64Proto struct { - Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } -func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } -func (*Integer64Proto) ProtoMessage() {} - -func (m *Integer64Proto) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BoolProto struct { - Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BoolProto) Reset() { *m = BoolProto{} } -func (m *BoolProto) String() string { return proto.CompactTextString(m) } -func (*BoolProto) ProtoMessage() {} - -func (m *BoolProto) GetValue() bool { - if m != nil && m.Value != nil { - return *m.Value - } - return false -} - -type DoubleProto struct { - Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DoubleProto) Reset() { *m = DoubleProto{} } -func (m *DoubleProto) String() string { return proto.CompactTextString(m) } -func (*DoubleProto) ProtoMessage() {} - -func (m *DoubleProto) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BytesProto struct { - Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BytesProto) Reset() { *m = BytesProto{} } -func (m *BytesProto) String() string { return proto.CompactTextString(m) } -func (*BytesProto) ProtoMessage() {} - -func (m *BytesProto) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type VoidProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *VoidProto) Reset() { *m = VoidProto{} } -func (m *VoidProto) String() string { return proto.CompactTextString(m) } -func (*VoidProto) ProtoMessage() {} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto deleted file mode 100644 index 56cd7a3..0000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Built-in base types for API calls. Primarily useful as return types. - -syntax = "proto2"; -option go_package = "base"; - -package appengine.base; - -message StringProto { - required string value = 1; -} - -message Integer32Proto { - required int32 value = 1; -} - -message Integer64Proto { - required int64 value = 1; -} - -message BoolProto { - required bool value = 1; -} - -message DoubleProto { - required double value = 1; -} - -message BytesProto { - required bytes value = 1 [ctype=CORD]; -} - -message VoidProto { -} diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go deleted file mode 100644 index 8705ec3..0000000 --- a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go +++ /dev/null @@ -1,347 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/blobstore/blobstore_service.proto -// DO NOT EDIT! - -/* -Package blobstore is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/blobstore/blobstore_service.proto - -It has these top-level messages: - BlobstoreServiceError - CreateUploadURLRequest - CreateUploadURLResponse - DeleteBlobRequest - FetchDataRequest - FetchDataResponse - CloneBlobRequest - CloneBlobResponse - DecodeBlobKeyRequest - DecodeBlobKeyResponse - CreateEncodedGoogleStorageKeyRequest - CreateEncodedGoogleStorageKeyResponse -*/ -package blobstore - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type BlobstoreServiceError_ErrorCode int32 - -const ( - BlobstoreServiceError_OK BlobstoreServiceError_ErrorCode = 0 - BlobstoreServiceError_INTERNAL_ERROR BlobstoreServiceError_ErrorCode = 1 - BlobstoreServiceError_URL_TOO_LONG BlobstoreServiceError_ErrorCode = 2 - BlobstoreServiceError_PERMISSION_DENIED BlobstoreServiceError_ErrorCode = 3 - BlobstoreServiceError_BLOB_NOT_FOUND BlobstoreServiceError_ErrorCode = 4 - BlobstoreServiceError_DATA_INDEX_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 5 - BlobstoreServiceError_BLOB_FETCH_SIZE_TOO_LARGE BlobstoreServiceError_ErrorCode = 6 - BlobstoreServiceError_ARGUMENT_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 8 - BlobstoreServiceError_INVALID_BLOB_KEY BlobstoreServiceError_ErrorCode = 9 -) - -var BlobstoreServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INTERNAL_ERROR", - 2: "URL_TOO_LONG", - 3: "PERMISSION_DENIED", - 4: "BLOB_NOT_FOUND", - 5: "DATA_INDEX_OUT_OF_RANGE", - 6: "BLOB_FETCH_SIZE_TOO_LARGE", - 8: "ARGUMENT_OUT_OF_RANGE", - 9: "INVALID_BLOB_KEY", -} -var BlobstoreServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INTERNAL_ERROR": 1, - "URL_TOO_LONG": 2, - "PERMISSION_DENIED": 3, - "BLOB_NOT_FOUND": 4, - "DATA_INDEX_OUT_OF_RANGE": 5, - "BLOB_FETCH_SIZE_TOO_LARGE": 6, - "ARGUMENT_OUT_OF_RANGE": 8, - "INVALID_BLOB_KEY": 9, -} - -func (x BlobstoreServiceError_ErrorCode) Enum() *BlobstoreServiceError_ErrorCode { - p := new(BlobstoreServiceError_ErrorCode) - *p = x - return p -} -func (x BlobstoreServiceError_ErrorCode) String() string { - return proto.EnumName(BlobstoreServiceError_ErrorCode_name, int32(x)) -} -func (x *BlobstoreServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(BlobstoreServiceError_ErrorCode_value, data, "BlobstoreServiceError_ErrorCode") - if err != nil { - return err - } - *x = BlobstoreServiceError_ErrorCode(value) - return nil -} - -type BlobstoreServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *BlobstoreServiceError) Reset() { *m = BlobstoreServiceError{} } -func (m *BlobstoreServiceError) String() string { return proto.CompactTextString(m) } -func (*BlobstoreServiceError) ProtoMessage() {} - -type CreateUploadURLRequest struct { - SuccessPath *string `protobuf:"bytes,1,req,name=success_path" json:"success_path,omitempty"` - MaxUploadSizeBytes *int64 `protobuf:"varint,2,opt,name=max_upload_size_bytes" json:"max_upload_size_bytes,omitempty"` - MaxUploadSizePerBlobBytes *int64 `protobuf:"varint,3,opt,name=max_upload_size_per_blob_bytes" json:"max_upload_size_per_blob_bytes,omitempty"` - GsBucketName *string `protobuf:"bytes,4,opt,name=gs_bucket_name" json:"gs_bucket_name,omitempty"` - UrlExpiryTimeSeconds *int32 `protobuf:"varint,5,opt,name=url_expiry_time_seconds" json:"url_expiry_time_seconds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateUploadURLRequest) Reset() { *m = CreateUploadURLRequest{} } -func (m *CreateUploadURLRequest) String() string { return proto.CompactTextString(m) } -func (*CreateUploadURLRequest) ProtoMessage() {} - -func (m *CreateUploadURLRequest) GetSuccessPath() string { - if m != nil && m.SuccessPath != nil { - return *m.SuccessPath - } - return "" -} - -func (m *CreateUploadURLRequest) GetMaxUploadSizeBytes() int64 { - if m != nil && m.MaxUploadSizeBytes != nil { - return *m.MaxUploadSizeBytes - } - return 0 -} - -func (m *CreateUploadURLRequest) GetMaxUploadSizePerBlobBytes() int64 { - if m != nil && m.MaxUploadSizePerBlobBytes != nil { - return *m.MaxUploadSizePerBlobBytes - } - return 0 -} - -func (m *CreateUploadURLRequest) GetGsBucketName() string { - if m != nil && m.GsBucketName != nil { - return *m.GsBucketName - } - return "" -} - -func (m *CreateUploadURLRequest) GetUrlExpiryTimeSeconds() int32 { - if m != nil && m.UrlExpiryTimeSeconds != nil { - return *m.UrlExpiryTimeSeconds - } - return 0 -} - -type CreateUploadURLResponse struct { - Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateUploadURLResponse) Reset() { *m = CreateUploadURLResponse{} } -func (m *CreateUploadURLResponse) String() string { return proto.CompactTextString(m) } -func (*CreateUploadURLResponse) ProtoMessage() {} - -func (m *CreateUploadURLResponse) GetUrl() string { - if m != nil && m.Url != nil { - return *m.Url - } - return "" -} - -type DeleteBlobRequest struct { - BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"` - Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteBlobRequest) Reset() { *m = DeleteBlobRequest{} } -func (m *DeleteBlobRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteBlobRequest) ProtoMessage() {} - -func (m *DeleteBlobRequest) GetBlobKey() []string { - if m != nil { - return m.BlobKey - } - return nil -} - -func (m *DeleteBlobRequest) GetToken() string { - if m != nil && m.Token != nil { - return *m.Token - } - return "" -} - -type FetchDataRequest struct { - BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` - StartIndex *int64 `protobuf:"varint,2,req,name=start_index" json:"start_index,omitempty"` - EndIndex *int64 `protobuf:"varint,3,req,name=end_index" json:"end_index,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FetchDataRequest) Reset() { *m = FetchDataRequest{} } -func (m *FetchDataRequest) String() string { return proto.CompactTextString(m) } -func (*FetchDataRequest) ProtoMessage() {} - -func (m *FetchDataRequest) GetBlobKey() string { - if m != nil && m.BlobKey != nil { - return *m.BlobKey - } - return "" -} - -func (m *FetchDataRequest) GetStartIndex() int64 { - if m != nil && m.StartIndex != nil { - return *m.StartIndex - } - return 0 -} - -func (m *FetchDataRequest) GetEndIndex() int64 { - if m != nil && m.EndIndex != nil { - return *m.EndIndex - } - return 0 -} - -type FetchDataResponse struct { - Data []byte `protobuf:"bytes,1000,req,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FetchDataResponse) Reset() { *m = FetchDataResponse{} } -func (m *FetchDataResponse) String() string { return proto.CompactTextString(m) } -func (*FetchDataResponse) ProtoMessage() {} - -func (m *FetchDataResponse) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type CloneBlobRequest struct { - BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` - MimeType []byte `protobuf:"bytes,2,req,name=mime_type" json:"mime_type,omitempty"` - TargetAppId []byte `protobuf:"bytes,3,req,name=target_app_id" json:"target_app_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CloneBlobRequest) Reset() { *m = CloneBlobRequest{} } -func (m *CloneBlobRequest) String() string { return proto.CompactTextString(m) } -func (*CloneBlobRequest) ProtoMessage() {} - -func (m *CloneBlobRequest) GetBlobKey() []byte { - if m != nil { - return m.BlobKey - } - return nil -} - -func (m *CloneBlobRequest) GetMimeType() []byte { - if m != nil { - return m.MimeType - } - return nil -} - -func (m *CloneBlobRequest) GetTargetAppId() []byte { - if m != nil { - return m.TargetAppId - } - return nil -} - -type CloneBlobResponse struct { - BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CloneBlobResponse) Reset() { *m = CloneBlobResponse{} } -func (m *CloneBlobResponse) String() string { return proto.CompactTextString(m) } -func (*CloneBlobResponse) ProtoMessage() {} - -func (m *CloneBlobResponse) GetBlobKey() []byte { - if m != nil { - return m.BlobKey - } - return nil -} - -type DecodeBlobKeyRequest struct { - BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DecodeBlobKeyRequest) Reset() { *m = DecodeBlobKeyRequest{} } -func (m *DecodeBlobKeyRequest) String() string { return proto.CompactTextString(m) } -func (*DecodeBlobKeyRequest) ProtoMessage() {} - -func (m *DecodeBlobKeyRequest) GetBlobKey() []string { - if m != nil { - return m.BlobKey - } - return nil -} - -type DecodeBlobKeyResponse struct { - Decoded []string `protobuf:"bytes,1,rep,name=decoded" json:"decoded,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DecodeBlobKeyResponse) Reset() { *m = DecodeBlobKeyResponse{} } -func (m *DecodeBlobKeyResponse) String() string { return proto.CompactTextString(m) } -func (*DecodeBlobKeyResponse) ProtoMessage() {} - -func (m *DecodeBlobKeyResponse) GetDecoded() []string { - if m != nil { - return m.Decoded - } - return nil -} - -type CreateEncodedGoogleStorageKeyRequest struct { - Filename *string `protobuf:"bytes,1,req,name=filename" json:"filename,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateEncodedGoogleStorageKeyRequest) Reset() { *m = CreateEncodedGoogleStorageKeyRequest{} } -func (m *CreateEncodedGoogleStorageKeyRequest) String() string { return proto.CompactTextString(m) } -func (*CreateEncodedGoogleStorageKeyRequest) ProtoMessage() {} - -func (m *CreateEncodedGoogleStorageKeyRequest) GetFilename() string { - if m != nil && m.Filename != nil { - return *m.Filename - } - return "" -} - -type CreateEncodedGoogleStorageKeyResponse struct { - BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateEncodedGoogleStorageKeyResponse) Reset() { *m = CreateEncodedGoogleStorageKeyResponse{} } -func (m *CreateEncodedGoogleStorageKeyResponse) String() string { return proto.CompactTextString(m) } -func (*CreateEncodedGoogleStorageKeyResponse) ProtoMessage() {} - -func (m *CreateEncodedGoogleStorageKeyResponse) GetBlobKey() string { - if m != nil && m.BlobKey != nil { - return *m.BlobKey - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto deleted file mode 100644 index 33b2650..0000000 --- a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto +++ /dev/null @@ -1,71 +0,0 @@ -syntax = "proto2"; -option go_package = "blobstore"; - -package appengine; - -message BlobstoreServiceError { - enum ErrorCode { - OK = 0; - INTERNAL_ERROR = 1; - URL_TOO_LONG = 2; - PERMISSION_DENIED = 3; - BLOB_NOT_FOUND = 4; - DATA_INDEX_OUT_OF_RANGE = 5; - BLOB_FETCH_SIZE_TOO_LARGE = 6; - ARGUMENT_OUT_OF_RANGE = 8; - INVALID_BLOB_KEY = 9; - } -} - -message CreateUploadURLRequest { - required string success_path = 1; - optional int64 max_upload_size_bytes = 2; - optional int64 max_upload_size_per_blob_bytes = 3; - optional string gs_bucket_name = 4; - optional int32 url_expiry_time_seconds = 5; -} - -message CreateUploadURLResponse { - required string url = 1; -} - -message DeleteBlobRequest { - repeated string blob_key = 1; - optional string token = 2; -} - -message FetchDataRequest { - required string blob_key = 1; - required int64 start_index = 2; - required int64 end_index = 3; -} - -message FetchDataResponse { - required bytes data = 1000 [ctype = CORD]; -} - -message CloneBlobRequest { - required bytes blob_key = 1; - required bytes mime_type = 2; - required bytes target_app_id = 3; -} - -message CloneBlobResponse { - required bytes blob_key = 1; -} - -message DecodeBlobKeyRequest { - repeated string blob_key = 1; -} - -message DecodeBlobKeyResponse { - repeated string decoded = 1; -} - -message CreateEncodedGoogleStorageKeyRequest { - required string filename = 1; -} - -message CreateEncodedGoogleStorageKeyResponse { - required string blob_key = 1; -} diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go deleted file mode 100644 index e57a04b..0000000 --- a/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go +++ /dev/null @@ -1,125 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/capability/capability_service.proto -// DO NOT EDIT! - -/* -Package channel is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/capability/capability_service.proto - -It has these top-level messages: - IsEnabledRequest - IsEnabledResponse -*/ -package channel - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type IsEnabledResponse_SummaryStatus int32 - -const ( - IsEnabledResponse_DEFAULT IsEnabledResponse_SummaryStatus = 0 - IsEnabledResponse_ENABLED IsEnabledResponse_SummaryStatus = 1 - IsEnabledResponse_SCHEDULED_FUTURE IsEnabledResponse_SummaryStatus = 2 - IsEnabledResponse_SCHEDULED_NOW IsEnabledResponse_SummaryStatus = 3 - IsEnabledResponse_DISABLED IsEnabledResponse_SummaryStatus = 4 - IsEnabledResponse_UNKNOWN IsEnabledResponse_SummaryStatus = 5 -) - -var IsEnabledResponse_SummaryStatus_name = map[int32]string{ - 0: "DEFAULT", - 1: "ENABLED", - 2: "SCHEDULED_FUTURE", - 3: "SCHEDULED_NOW", - 4: "DISABLED", - 5: "UNKNOWN", -} -var IsEnabledResponse_SummaryStatus_value = map[string]int32{ - "DEFAULT": 0, - "ENABLED": 1, - "SCHEDULED_FUTURE": 2, - "SCHEDULED_NOW": 3, - "DISABLED": 4, - "UNKNOWN": 5, -} - -func (x IsEnabledResponse_SummaryStatus) Enum() *IsEnabledResponse_SummaryStatus { - p := new(IsEnabledResponse_SummaryStatus) - *p = x - return p -} -func (x IsEnabledResponse_SummaryStatus) String() string { - return proto.EnumName(IsEnabledResponse_SummaryStatus_name, int32(x)) -} -func (x *IsEnabledResponse_SummaryStatus) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(IsEnabledResponse_SummaryStatus_value, data, "IsEnabledResponse_SummaryStatus") - if err != nil { - return err - } - *x = IsEnabledResponse_SummaryStatus(value) - return nil -} - -type IsEnabledRequest struct { - Package *string `protobuf:"bytes,1,req,name=package" json:"package,omitempty"` - Capability []string `protobuf:"bytes,2,rep,name=capability" json:"capability,omitempty"` - Call []string `protobuf:"bytes,3,rep,name=call" json:"call,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IsEnabledRequest) Reset() { *m = IsEnabledRequest{} } -func (m *IsEnabledRequest) String() string { return proto.CompactTextString(m) } -func (*IsEnabledRequest) ProtoMessage() {} - -func (m *IsEnabledRequest) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *IsEnabledRequest) GetCapability() []string { - if m != nil { - return m.Capability - } - return nil -} - -func (m *IsEnabledRequest) GetCall() []string { - if m != nil { - return m.Call - } - return nil -} - -type IsEnabledResponse struct { - SummaryStatus *IsEnabledResponse_SummaryStatus `protobuf:"varint,1,opt,name=summary_status,enum=appengine.IsEnabledResponse_SummaryStatus" json:"summary_status,omitempty"` - TimeUntilScheduled *int64 `protobuf:"varint,2,opt,name=time_until_scheduled" json:"time_until_scheduled,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IsEnabledResponse) Reset() { *m = IsEnabledResponse{} } -func (m *IsEnabledResponse) String() string { return proto.CompactTextString(m) } -func (*IsEnabledResponse) ProtoMessage() {} - -func (m *IsEnabledResponse) GetSummaryStatus() IsEnabledResponse_SummaryStatus { - if m != nil && m.SummaryStatus != nil { - return *m.SummaryStatus - } - return IsEnabledResponse_DEFAULT -} - -func (m *IsEnabledResponse) GetTimeUntilScheduled() int64 { - if m != nil && m.TimeUntilScheduled != nil { - return *m.TimeUntilScheduled - } - return 0 -} diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.proto b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto deleted file mode 100644 index 8f7256d..0000000 --- a/vendor/google.golang.org/appengine/internal/capability/capability_service.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto2"; -option go_package = "channel"; - -package appengine; - -message IsEnabledRequest { - required string package = 1; - repeated string capability = 2; - repeated string call = 3; -} - -message IsEnabledResponse { - enum SummaryStatus { - DEFAULT = 0; - ENABLED = 1; - SCHEDULED_FUTURE = 2; - SCHEDULED_NOW = 3; - DISABLED = 4; - UNKNOWN = 5; - } - optional SummaryStatus summary_status = 1; - - optional int64 time_until_scheduled = 2; -} - -service CapabilityService { - rpc IsEnabled(IsEnabledRequest) returns (IsEnabledResponse) {}; -} diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go deleted file mode 100644 index 7b8d00c..0000000 --- a/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/channel/channel_service.proto -// DO NOT EDIT! - -/* -Package channel is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/channel/channel_service.proto - -It has these top-level messages: - ChannelServiceError - CreateChannelRequest - CreateChannelResponse - SendMessageRequest -*/ -package channel - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type ChannelServiceError_ErrorCode int32 - -const ( - ChannelServiceError_OK ChannelServiceError_ErrorCode = 0 - ChannelServiceError_INTERNAL_ERROR ChannelServiceError_ErrorCode = 1 - ChannelServiceError_INVALID_CHANNEL_KEY ChannelServiceError_ErrorCode = 2 - ChannelServiceError_BAD_MESSAGE ChannelServiceError_ErrorCode = 3 - ChannelServiceError_INVALID_CHANNEL_TOKEN_DURATION ChannelServiceError_ErrorCode = 4 - ChannelServiceError_APPID_ALIAS_REQUIRED ChannelServiceError_ErrorCode = 5 -) - -var ChannelServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INTERNAL_ERROR", - 2: "INVALID_CHANNEL_KEY", - 3: "BAD_MESSAGE", - 4: "INVALID_CHANNEL_TOKEN_DURATION", - 5: "APPID_ALIAS_REQUIRED", -} -var ChannelServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INTERNAL_ERROR": 1, - "INVALID_CHANNEL_KEY": 2, - "BAD_MESSAGE": 3, - "INVALID_CHANNEL_TOKEN_DURATION": 4, - "APPID_ALIAS_REQUIRED": 5, -} - -func (x ChannelServiceError_ErrorCode) Enum() *ChannelServiceError_ErrorCode { - p := new(ChannelServiceError_ErrorCode) - *p = x - return p -} -func (x ChannelServiceError_ErrorCode) String() string { - return proto.EnumName(ChannelServiceError_ErrorCode_name, int32(x)) -} -func (x *ChannelServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ChannelServiceError_ErrorCode_value, data, "ChannelServiceError_ErrorCode") - if err != nil { - return err - } - *x = ChannelServiceError_ErrorCode(value) - return nil -} - -type ChannelServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ChannelServiceError) Reset() { *m = ChannelServiceError{} } -func (m *ChannelServiceError) String() string { return proto.CompactTextString(m) } -func (*ChannelServiceError) ProtoMessage() {} - -type CreateChannelRequest struct { - ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"` - DurationMinutes *int32 `protobuf:"varint,2,opt,name=duration_minutes" json:"duration_minutes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateChannelRequest) Reset() { *m = CreateChannelRequest{} } -func (m *CreateChannelRequest) String() string { return proto.CompactTextString(m) } -func (*CreateChannelRequest) ProtoMessage() {} - -func (m *CreateChannelRequest) GetApplicationKey() string { - if m != nil && m.ApplicationKey != nil { - return *m.ApplicationKey - } - return "" -} - -func (m *CreateChannelRequest) GetDurationMinutes() int32 { - if m != nil && m.DurationMinutes != nil { - return *m.DurationMinutes - } - return 0 -} - -type CreateChannelResponse struct { - Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"` - DurationMinutes *int32 `protobuf:"varint,3,opt,name=duration_minutes" json:"duration_minutes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateChannelResponse) Reset() { *m = CreateChannelResponse{} } -func (m *CreateChannelResponse) String() string { return proto.CompactTextString(m) } -func (*CreateChannelResponse) ProtoMessage() {} - -func (m *CreateChannelResponse) GetToken() string { - if m != nil && m.Token != nil { - return *m.Token - } - return "" -} - -func (m *CreateChannelResponse) GetDurationMinutes() int32 { - if m != nil && m.DurationMinutes != nil { - return *m.DurationMinutes - } - return 0 -} - -type SendMessageRequest struct { - ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"` - Message *string `protobuf:"bytes,2,req,name=message" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SendMessageRequest) Reset() { *m = SendMessageRequest{} } -func (m *SendMessageRequest) String() string { return proto.CompactTextString(m) } -func (*SendMessageRequest) ProtoMessage() {} - -func (m *SendMessageRequest) GetApplicationKey() string { - if m != nil && m.ApplicationKey != nil { - return *m.ApplicationKey - } - return "" -} - -func (m *SendMessageRequest) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.proto b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto deleted file mode 100644 index 2b5a918..0000000 --- a/vendor/google.golang.org/appengine/internal/channel/channel_service.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto2"; -option go_package = "channel"; - -package appengine; - -message ChannelServiceError { - enum ErrorCode { - OK = 0; - INTERNAL_ERROR = 1; - INVALID_CHANNEL_KEY = 2; - BAD_MESSAGE = 3; - INVALID_CHANNEL_TOKEN_DURATION = 4; - APPID_ALIAS_REQUIRED = 5; - } -} - -message CreateChannelRequest { - required string application_key = 1; - optional int32 duration_minutes = 2; -} - -message CreateChannelResponse { - optional string token = 2; - optional int32 duration_minutes = 3; -} - -message SendMessageRequest { - required string application_key = 1; - required string message = 2; -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go deleted file mode 100644 index 8613cb7..0000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go +++ /dev/null @@ -1,2778 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto -// DO NOT EDIT! - -/* -Package datastore is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/datastore/datastore_v3.proto - -It has these top-level messages: - Action - PropertyValue - Property - Path - Reference - User - EntityProto - CompositeProperty - Index - CompositeIndex - IndexPostfix - IndexPosition - Snapshot - InternalHeader - Transaction - Query - CompiledQuery - CompiledCursor - Cursor - Error - Cost - GetRequest - GetResponse - PutRequest - PutResponse - TouchRequest - TouchResponse - DeleteRequest - DeleteResponse - NextRequest - QueryResult - AllocateIdsRequest - AllocateIdsResponse - CompositeIndices - AddActionsRequest - AddActionsResponse - BeginTransactionRequest - CommitResponse -*/ -package datastore - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Property_Meaning int32 - -const ( - Property_NO_MEANING Property_Meaning = 0 - Property_BLOB Property_Meaning = 14 - Property_TEXT Property_Meaning = 15 - Property_BYTESTRING Property_Meaning = 16 - Property_ATOM_CATEGORY Property_Meaning = 1 - Property_ATOM_LINK Property_Meaning = 2 - Property_ATOM_TITLE Property_Meaning = 3 - Property_ATOM_CONTENT Property_Meaning = 4 - Property_ATOM_SUMMARY Property_Meaning = 5 - Property_ATOM_AUTHOR Property_Meaning = 6 - Property_GD_WHEN Property_Meaning = 7 - Property_GD_EMAIL Property_Meaning = 8 - Property_GEORSS_POINT Property_Meaning = 9 - Property_GD_IM Property_Meaning = 10 - Property_GD_PHONENUMBER Property_Meaning = 11 - Property_GD_POSTALADDRESS Property_Meaning = 12 - Property_GD_RATING Property_Meaning = 13 - Property_BLOBKEY Property_Meaning = 17 - Property_ENTITY_PROTO Property_Meaning = 19 - Property_INDEX_VALUE Property_Meaning = 18 -) - -var Property_Meaning_name = map[int32]string{ - 0: "NO_MEANING", - 14: "BLOB", - 15: "TEXT", - 16: "BYTESTRING", - 1: "ATOM_CATEGORY", - 2: "ATOM_LINK", - 3: "ATOM_TITLE", - 4: "ATOM_CONTENT", - 5: "ATOM_SUMMARY", - 6: "ATOM_AUTHOR", - 7: "GD_WHEN", - 8: "GD_EMAIL", - 9: "GEORSS_POINT", - 10: "GD_IM", - 11: "GD_PHONENUMBER", - 12: "GD_POSTALADDRESS", - 13: "GD_RATING", - 17: "BLOBKEY", - 19: "ENTITY_PROTO", - 18: "INDEX_VALUE", -} -var Property_Meaning_value = map[string]int32{ - "NO_MEANING": 0, - "BLOB": 14, - "TEXT": 15, - "BYTESTRING": 16, - "ATOM_CATEGORY": 1, - "ATOM_LINK": 2, - "ATOM_TITLE": 3, - "ATOM_CONTENT": 4, - "ATOM_SUMMARY": 5, - "ATOM_AUTHOR": 6, - "GD_WHEN": 7, - "GD_EMAIL": 8, - "GEORSS_POINT": 9, - "GD_IM": 10, - "GD_PHONENUMBER": 11, - "GD_POSTALADDRESS": 12, - "GD_RATING": 13, - "BLOBKEY": 17, - "ENTITY_PROTO": 19, - "INDEX_VALUE": 18, -} - -func (x Property_Meaning) Enum() *Property_Meaning { - p := new(Property_Meaning) - *p = x - return p -} -func (x Property_Meaning) String() string { - return proto.EnumName(Property_Meaning_name, int32(x)) -} -func (x *Property_Meaning) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") - if err != nil { - return err - } - *x = Property_Meaning(value) - return nil -} - -type Property_FtsTokenizationOption int32 - -const ( - Property_HTML Property_FtsTokenizationOption = 1 - Property_ATOM Property_FtsTokenizationOption = 2 -) - -var Property_FtsTokenizationOption_name = map[int32]string{ - 1: "HTML", - 2: "ATOM", -} -var Property_FtsTokenizationOption_value = map[string]int32{ - "HTML": 1, - "ATOM": 2, -} - -func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { - p := new(Property_FtsTokenizationOption) - *p = x - return p -} -func (x Property_FtsTokenizationOption) String() string { - return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) -} -func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") - if err != nil { - return err - } - *x = Property_FtsTokenizationOption(value) - return nil -} - -type EntityProto_Kind int32 - -const ( - EntityProto_GD_CONTACT EntityProto_Kind = 1 - EntityProto_GD_EVENT EntityProto_Kind = 2 - EntityProto_GD_MESSAGE EntityProto_Kind = 3 -) - -var EntityProto_Kind_name = map[int32]string{ - 1: "GD_CONTACT", - 2: "GD_EVENT", - 3: "GD_MESSAGE", -} -var EntityProto_Kind_value = map[string]int32{ - "GD_CONTACT": 1, - "GD_EVENT": 2, - "GD_MESSAGE": 3, -} - -func (x EntityProto_Kind) Enum() *EntityProto_Kind { - p := new(EntityProto_Kind) - *p = x - return p -} -func (x EntityProto_Kind) String() string { - return proto.EnumName(EntityProto_Kind_name, int32(x)) -} -func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") - if err != nil { - return err - } - *x = EntityProto_Kind(value) - return nil -} - -type Index_Property_Direction int32 - -const ( - Index_Property_ASCENDING Index_Property_Direction = 1 - Index_Property_DESCENDING Index_Property_Direction = 2 -) - -var Index_Property_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Index_Property_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Index_Property_Direction) Enum() *Index_Property_Direction { - p := new(Index_Property_Direction) - *p = x - return p -} -func (x Index_Property_Direction) String() string { - return proto.EnumName(Index_Property_Direction_name, int32(x)) -} -func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") - if err != nil { - return err - } - *x = Index_Property_Direction(value) - return nil -} - -type CompositeIndex_State int32 - -const ( - CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 - CompositeIndex_READ_WRITE CompositeIndex_State = 2 - CompositeIndex_DELETED CompositeIndex_State = 3 - CompositeIndex_ERROR CompositeIndex_State = 4 -) - -var CompositeIndex_State_name = map[int32]string{ - 1: "WRITE_ONLY", - 2: "READ_WRITE", - 3: "DELETED", - 4: "ERROR", -} -var CompositeIndex_State_value = map[string]int32{ - "WRITE_ONLY": 1, - "READ_WRITE": 2, - "DELETED": 3, - "ERROR": 4, -} - -func (x CompositeIndex_State) Enum() *CompositeIndex_State { - p := new(CompositeIndex_State) - *p = x - return p -} -func (x CompositeIndex_State) String() string { - return proto.EnumName(CompositeIndex_State_name, int32(x)) -} -func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") - if err != nil { - return err - } - *x = CompositeIndex_State(value) - return nil -} - -type Snapshot_Status int32 - -const ( - Snapshot_INACTIVE Snapshot_Status = 0 - Snapshot_ACTIVE Snapshot_Status = 1 -) - -var Snapshot_Status_name = map[int32]string{ - 0: "INACTIVE", - 1: "ACTIVE", -} -var Snapshot_Status_value = map[string]int32{ - "INACTIVE": 0, - "ACTIVE": 1, -} - -func (x Snapshot_Status) Enum() *Snapshot_Status { - p := new(Snapshot_Status) - *p = x - return p -} -func (x Snapshot_Status) String() string { - return proto.EnumName(Snapshot_Status_name, int32(x)) -} -func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") - if err != nil { - return err - } - *x = Snapshot_Status(value) - return nil -} - -type Query_Hint int32 - -const ( - Query_ORDER_FIRST Query_Hint = 1 - Query_ANCESTOR_FIRST Query_Hint = 2 - Query_FILTER_FIRST Query_Hint = 3 -) - -var Query_Hint_name = map[int32]string{ - 1: "ORDER_FIRST", - 2: "ANCESTOR_FIRST", - 3: "FILTER_FIRST", -} -var Query_Hint_value = map[string]int32{ - "ORDER_FIRST": 1, - "ANCESTOR_FIRST": 2, - "FILTER_FIRST": 3, -} - -func (x Query_Hint) Enum() *Query_Hint { - p := new(Query_Hint) - *p = x - return p -} -func (x Query_Hint) String() string { - return proto.EnumName(Query_Hint_name, int32(x)) -} -func (x *Query_Hint) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") - if err != nil { - return err - } - *x = Query_Hint(value) - return nil -} - -type Query_Filter_Operator int32 - -const ( - Query_Filter_LESS_THAN Query_Filter_Operator = 1 - Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 - Query_Filter_GREATER_THAN Query_Filter_Operator = 3 - Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 - Query_Filter_EQUAL Query_Filter_Operator = 5 - Query_Filter_IN Query_Filter_Operator = 6 - Query_Filter_EXISTS Query_Filter_Operator = 7 -) - -var Query_Filter_Operator_name = map[int32]string{ - 1: "LESS_THAN", - 2: "LESS_THAN_OR_EQUAL", - 3: "GREATER_THAN", - 4: "GREATER_THAN_OR_EQUAL", - 5: "EQUAL", - 6: "IN", - 7: "EXISTS", -} -var Query_Filter_Operator_value = map[string]int32{ - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "IN": 6, - "EXISTS": 7, -} - -func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { - p := new(Query_Filter_Operator) - *p = x - return p -} -func (x Query_Filter_Operator) String() string { - return proto.EnumName(Query_Filter_Operator_name, int32(x)) -} -func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") - if err != nil { - return err - } - *x = Query_Filter_Operator(value) - return nil -} - -type Query_Order_Direction int32 - -const ( - Query_Order_ASCENDING Query_Order_Direction = 1 - Query_Order_DESCENDING Query_Order_Direction = 2 -) - -var Query_Order_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Query_Order_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Query_Order_Direction) Enum() *Query_Order_Direction { - p := new(Query_Order_Direction) - *p = x - return p -} -func (x Query_Order_Direction) String() string { - return proto.EnumName(Query_Order_Direction_name, int32(x)) -} -func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") - if err != nil { - return err - } - *x = Query_Order_Direction(value) - return nil -} - -type Error_ErrorCode int32 - -const ( - Error_BAD_REQUEST Error_ErrorCode = 1 - Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 - Error_INTERNAL_ERROR Error_ErrorCode = 3 - Error_NEED_INDEX Error_ErrorCode = 4 - Error_TIMEOUT Error_ErrorCode = 5 - Error_PERMISSION_DENIED Error_ErrorCode = 6 - Error_BIGTABLE_ERROR Error_ErrorCode = 7 - Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 - Error_CAPABILITY_DISABLED Error_ErrorCode = 9 - Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 - Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 -) - -var Error_ErrorCode_name = map[int32]string{ - 1: "BAD_REQUEST", - 2: "CONCURRENT_TRANSACTION", - 3: "INTERNAL_ERROR", - 4: "NEED_INDEX", - 5: "TIMEOUT", - 6: "PERMISSION_DENIED", - 7: "BIGTABLE_ERROR", - 8: "COMMITTED_BUT_STILL_APPLYING", - 9: "CAPABILITY_DISABLED", - 10: "TRY_ALTERNATE_BACKEND", - 11: "SAFE_TIME_TOO_OLD", -} -var Error_ErrorCode_value = map[string]int32{ - "BAD_REQUEST": 1, - "CONCURRENT_TRANSACTION": 2, - "INTERNAL_ERROR": 3, - "NEED_INDEX": 4, - "TIMEOUT": 5, - "PERMISSION_DENIED": 6, - "BIGTABLE_ERROR": 7, - "COMMITTED_BUT_STILL_APPLYING": 8, - "CAPABILITY_DISABLED": 9, - "TRY_ALTERNATE_BACKEND": 10, - "SAFE_TIME_TOO_OLD": 11, -} - -func (x Error_ErrorCode) Enum() *Error_ErrorCode { - p := new(Error_ErrorCode) - *p = x - return p -} -func (x Error_ErrorCode) String() string { - return proto.EnumName(Error_ErrorCode_name, int32(x)) -} -func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") - if err != nil { - return err - } - *x = Error_ErrorCode(value) - return nil -} - -type PutRequest_AutoIdPolicy int32 - -const ( - PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 - PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 -) - -var PutRequest_AutoIdPolicy_name = map[int32]string{ - 0: "CURRENT", - 1: "SEQUENTIAL", -} -var PutRequest_AutoIdPolicy_value = map[string]int32{ - "CURRENT": 0, - "SEQUENTIAL": 1, -} - -func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { - p := new(PutRequest_AutoIdPolicy) - *p = x - return p -} -func (x PutRequest_AutoIdPolicy) String() string { - return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) -} -func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") - if err != nil { - return err - } - *x = PutRequest_AutoIdPolicy(value) - return nil -} - -type Action struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Action) Reset() { *m = Action{} } -func (m *Action) String() string { return proto.CompactTextString(m) } -func (*Action) ProtoMessage() {} - -type PropertyValue struct { - Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` - BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` - StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` - Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` - Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` - Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue) Reset() { *m = PropertyValue{} } -func (m *PropertyValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue) ProtoMessage() {} - -func (m *PropertyValue) GetInt64Value() int64 { - if m != nil && m.Int64Value != nil { - return *m.Int64Value - } - return 0 -} - -func (m *PropertyValue) GetBooleanValue() bool { - if m != nil && m.BooleanValue != nil { - return *m.BooleanValue - } - return false -} - -func (m *PropertyValue) GetStringValue() string { - if m != nil && m.StringValue != nil { - return *m.StringValue - } - return "" -} - -func (m *PropertyValue) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { - if m != nil { - return m.Pointvalue - } - return nil -} - -func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { - if m != nil { - return m.Uservalue - } - return nil -} - -func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { - if m != nil { - return m.Referencevalue - } - return nil -} - -type PropertyValue_PointValue struct { - X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` - Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } -func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_PointValue) ProtoMessage() {} - -func (m *PropertyValue_PointValue) GetX() float64 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *PropertyValue_PointValue) GetY() float64 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type PropertyValue_UserValue struct { - Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } -func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_UserValue) ProtoMessage() {} - -func (m *PropertyValue_UserValue) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *PropertyValue_UserValue) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *PropertyValue_UserValue) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type PropertyValue_ReferenceValue struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` - Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } -func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue) ProtoMessage() {} - -func (m *PropertyValue_ReferenceValue) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { - if m != nil { - return m.Pathelement - } - return nil -} - -type PropertyValue_ReferenceValue_PathElement struct { - Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_ReferenceValue_PathElement) Reset() { - *m = PropertyValue_ReferenceValue_PathElement{} -} -func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} - -func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Property struct { - Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` - MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` - Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` - Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` - FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` - Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Property) Reset() { *m = Property{} } -func (m *Property) String() string { return proto.CompactTextString(m) } -func (*Property) ProtoMessage() {} - -const Default_Property_Meaning Property_Meaning = Property_NO_MEANING -const Default_Property_Searchable bool = false -const Default_Property_Locale string = "en" - -func (m *Property) GetMeaning() Property_Meaning { - if m != nil && m.Meaning != nil { - return *m.Meaning - } - return Default_Property_Meaning -} - -func (m *Property) GetMeaningUri() string { - if m != nil && m.MeaningUri != nil { - return *m.MeaningUri - } - return "" -} - -func (m *Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Property) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -func (m *Property) GetMultiple() bool { - if m != nil && m.Multiple != nil { - return *m.Multiple - } - return false -} - -func (m *Property) GetSearchable() bool { - if m != nil && m.Searchable != nil { - return *m.Searchable - } - return Default_Property_Searchable -} - -func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { - if m != nil && m.FtsTokenizationOption != nil { - return *m.FtsTokenizationOption - } - return Property_HTML -} - -func (m *Property) GetLocale() string { - if m != nil && m.Locale != nil { - return *m.Locale - } - return Default_Property_Locale -} - -type Path struct { - Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} - -func (m *Path) GetElement() []*Path_Element { - if m != nil { - return m.Element - } - return nil -} - -type Path_Element struct { - Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Path_Element) Reset() { *m = Path_Element{} } -func (m *Path_Element) String() string { return proto.CompactTextString(m) } -func (*Path_Element) ProtoMessage() {} - -func (m *Path_Element) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *Path_Element) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *Path_Element) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Reference struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` - Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Reference) Reset() { *m = Reference{} } -func (m *Reference) String() string { return proto.CompactTextString(m) } -func (*Reference) ProtoMessage() {} - -func (m *Reference) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Reference) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Reference) GetPath() *Path { - if m != nil { - return m.Path - } - return nil -} - -type User struct { - Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} - -func (m *User) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *User) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *User) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *User) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *User) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type EntityProto struct { - Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` - EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` - Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` - Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` - KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` - Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EntityProto) Reset() { *m = EntityProto{} } -func (m *EntityProto) String() string { return proto.CompactTextString(m) } -func (*EntityProto) ProtoMessage() {} - -func (m *EntityProto) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *EntityProto) GetEntityGroup() *Path { - if m != nil { - return m.EntityGroup - } - return nil -} - -func (m *EntityProto) GetOwner() *User { - if m != nil { - return m.Owner - } - return nil -} - -func (m *EntityProto) GetKind() EntityProto_Kind { - if m != nil && m.Kind != nil { - return *m.Kind - } - return EntityProto_GD_CONTACT -} - -func (m *EntityProto) GetKindUri() string { - if m != nil && m.KindUri != nil { - return *m.KindUri - } - return "" -} - -func (m *EntityProto) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -func (m *EntityProto) GetRawProperty() []*Property { - if m != nil { - return m.RawProperty - } - return nil -} - -func (m *EntityProto) GetRank() int32 { - if m != nil && m.Rank != nil { - return *m.Rank - } - return 0 -} - -type CompositeProperty struct { - IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` - Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } -func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } -func (*CompositeProperty) ProtoMessage() {} - -func (m *CompositeProperty) GetIndexId() int64 { - if m != nil && m.IndexId != nil { - return *m.IndexId - } - return 0 -} - -func (m *CompositeProperty) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Index struct { - EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` - Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` - Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Index) Reset() { *m = Index{} } -func (m *Index) String() string { return proto.CompactTextString(m) } -func (*Index) ProtoMessage() {} - -func (m *Index) GetEntityType() string { - if m != nil && m.EntityType != nil { - return *m.EntityType - } - return "" -} - -func (m *Index) GetAncestor() bool { - if m != nil && m.Ancestor != nil { - return *m.Ancestor - } - return false -} - -func (m *Index) GetProperty() []*Index_Property { - if m != nil { - return m.Property - } - return nil -} - -type Index_Property struct { - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Index_Property) Reset() { *m = Index_Property{} } -func (m *Index_Property) String() string { return proto.CompactTextString(m) } -func (*Index_Property) ProtoMessage() {} - -const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING - -func (m *Index_Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Index_Property) GetDirection() Index_Property_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Index_Property_Direction -} - -type CompositeIndex struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` - Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` - State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` - OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } -func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } -func (*CompositeIndex) ProtoMessage() {} - -const Default_CompositeIndex_OnlyUseIfRequired bool = false - -func (m *CompositeIndex) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CompositeIndex) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *CompositeIndex) GetDefinition() *Index { - if m != nil { - return m.Definition - } - return nil -} - -func (m *CompositeIndex) GetState() CompositeIndex_State { - if m != nil && m.State != nil { - return *m.State - } - return CompositeIndex_WRITE_ONLY -} - -func (m *CompositeIndex) GetOnlyUseIfRequired() bool { - if m != nil && m.OnlyUseIfRequired != nil { - return *m.OnlyUseIfRequired - } - return Default_CompositeIndex_OnlyUseIfRequired -} - -type IndexPostfix struct { - IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` - Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } -func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix) ProtoMessage() {} - -const Default_IndexPostfix_Before bool = true - -func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { - if m != nil { - return m.IndexValue - } - return nil -} - -func (m *IndexPostfix) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *IndexPostfix) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPostfix_Before -} - -type IndexPostfix_IndexValue struct { - PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` - Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } -func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix_IndexValue) ProtoMessage() {} - -func (m *IndexPostfix_IndexValue) GetPropertyName() string { - if m != nil && m.PropertyName != nil { - return *m.PropertyName - } - return "" -} - -func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type IndexPosition struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPosition) Reset() { *m = IndexPosition{} } -func (m *IndexPosition) String() string { return proto.CompactTextString(m) } -func (*IndexPosition) ProtoMessage() {} - -const Default_IndexPosition_Before bool = true - -func (m *IndexPosition) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *IndexPosition) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPosition_Before -} - -type Snapshot struct { - Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -func (m *Snapshot) GetTs() int64 { - if m != nil && m.Ts != nil { - return *m.Ts - } - return 0 -} - -type InternalHeader struct { - Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InternalHeader) Reset() { *m = InternalHeader{} } -func (m *InternalHeader) String() string { return proto.CompactTextString(m) } -func (*InternalHeader) ProtoMessage() {} - -func (m *InternalHeader) GetQos() string { - if m != nil && m.Qos != nil { - return *m.Qos - } - return "" -} - -type Transaction struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` - App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` - MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} - -const Default_Transaction_MarkChanges bool = false - -func (m *Transaction) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Transaction) GetHandle() uint64 { - if m != nil && m.Handle != nil { - return *m.Handle - } - return 0 -} - -func (m *Transaction) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Transaction) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_Transaction_MarkChanges -} - -type Query struct { - Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` - Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` - SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` - Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` - Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` - Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` - EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` - RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` - KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` - Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` - Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` - FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` - PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` - GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` - Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` - MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` - SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` - PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} - -const Default_Query_Offset int32 = 0 -const Default_Query_RequirePerfectPlan bool = false -const Default_Query_KeysOnly bool = false -const Default_Query_Compile bool = false -const Default_Query_PersistOffset bool = false - -func (m *Query) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Query) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Query) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Query) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *Query) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -func (m *Query) GetFilter() []*Query_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *Query) GetSearchQuery() string { - if m != nil && m.SearchQuery != nil { - return *m.SearchQuery - } - return "" -} - -func (m *Query) GetOrder() []*Query_Order { - if m != nil { - return m.Order - } - return nil -} - -func (m *Query) GetHint() Query_Hint { - if m != nil && m.Hint != nil { - return *m.Hint - } - return Query_ORDER_FIRST -} - -func (m *Query) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *Query) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_Query_Offset -} - -func (m *Query) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *Query) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *Query) GetEndCompiledCursor() *CompiledCursor { - if m != nil { - return m.EndCompiledCursor - } - return nil -} - -func (m *Query) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *Query) GetRequirePerfectPlan() bool { - if m != nil && m.RequirePerfectPlan != nil { - return *m.RequirePerfectPlan - } - return Default_Query_RequirePerfectPlan -} - -func (m *Query) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return Default_Query_KeysOnly -} - -func (m *Query) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *Query) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_Query_Compile -} - -func (m *Query) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *Query) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *Query) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *Query) GetGroupByPropertyName() []string { - if m != nil { - return m.GroupByPropertyName - } - return nil -} - -func (m *Query) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return false -} - -func (m *Query) GetMinSafeTimeSeconds() int64 { - if m != nil && m.MinSafeTimeSeconds != nil { - return *m.MinSafeTimeSeconds - } - return 0 -} - -func (m *Query) GetSafeReplicaName() []string { - if m != nil { - return m.SafeReplicaName - } - return nil -} - -func (m *Query) GetPersistOffset() bool { - if m != nil && m.PersistOffset != nil { - return *m.PersistOffset - } - return Default_Query_PersistOffset -} - -type Query_Filter struct { - Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_Filter) Reset() { *m = Query_Filter{} } -func (m *Query_Filter) String() string { return proto.CompactTextString(m) } -func (*Query_Filter) ProtoMessage() {} - -func (m *Query_Filter) GetOp() Query_Filter_Operator { - if m != nil && m.Op != nil { - return *m.Op - } - return Query_Filter_LESS_THAN -} - -func (m *Query_Filter) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -type Query_Order struct { - Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` - Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_Order) Reset() { *m = Query_Order{} } -func (m *Query_Order) String() string { return proto.CompactTextString(m) } -func (*Query_Order) ProtoMessage() {} - -const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING - -func (m *Query_Order) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *Query_Order) GetDirection() Query_Order_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Query_Order_Direction -} - -type CompiledQuery struct { - Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` - Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` - IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` - Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` - KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` - PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` - DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` - Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } -func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery) ProtoMessage() {} - -const Default_CompiledQuery_Offset int32 = 0 - -func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { - if m != nil { - return m.Primaryscan - } - return nil -} - -func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { - if m != nil { - return m.Mergejoinscan - } - return nil -} - -func (m *CompiledQuery) GetIndexDef() *Index { - if m != nil { - return m.IndexDef - } - return nil -} - -func (m *CompiledQuery) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_CompiledQuery_Offset -} - -func (m *CompiledQuery) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *CompiledQuery) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *CompiledQuery) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *CompiledQuery) GetDistinctInfixSize() int32 { - if m != nil && m.DistinctInfixSize != nil { - return *m.DistinctInfixSize - } - return 0 -} - -func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { - if m != nil { - return m.Entityfilter - } - return nil -} - -type CompiledQuery_PrimaryScan struct { - IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` - StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` - StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` - EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` - EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` - StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` - EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` - EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } -func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_PrimaryScan) ProtoMessage() {} - -func (m *CompiledQuery_PrimaryScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetEndKey() string { - if m != nil && m.EndKey != nil { - return *m.EndKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { - if m != nil && m.EndInclusive != nil { - return *m.EndInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { - if m != nil { - return m.StartPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { - if m != nil { - return m.EndPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { - if m != nil && m.EndUnappliedLogTimestampUs != nil { - return *m.EndUnappliedLogTimestampUs - } - return 0 -} - -type CompiledQuery_MergeJoinScan struct { - IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` - PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` - ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } -func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} - -const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false - -func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { - if m != nil { - return m.PrefixValue - } - return nil -} - -func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { - if m != nil && m.ValuePrefix != nil { - return *m.ValuePrefix - } - return Default_CompiledQuery_MergeJoinScan_ValuePrefix -} - -type CompiledQuery_EntityFilter struct { - Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` - Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } -func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_EntityFilter) ProtoMessage() {} - -const Default_CompiledQuery_EntityFilter_Distinct bool = false - -func (m *CompiledQuery_EntityFilter) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return Default_CompiledQuery_EntityFilter_Distinct -} - -func (m *CompiledQuery_EntityFilter) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -type CompiledCursor struct { - Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } -func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor) ProtoMessage() {} - -func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { - if m != nil { - return m.Position - } - return nil -} - -type CompiledCursor_Position struct { - StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` - Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` - Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` - StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } -func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position) ProtoMessage() {} - -const Default_CompiledCursor_Position_StartInclusive bool = true - -func (m *CompiledCursor_Position) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { - if m != nil { - return m.Indexvalue - } - return nil -} - -func (m *CompiledCursor_Position) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *CompiledCursor_Position) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return Default_CompiledCursor_Position_StartInclusive -} - -type CompiledCursor_Position_IndexValue struct { - Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` - Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } -func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} - -func (m *CompiledCursor_Position_IndexValue) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type Cursor struct { - Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` - App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cursor) Reset() { *m = Cursor{} } -func (m *Cursor) String() string { return proto.CompactTextString(m) } -func (*Cursor) ProtoMessage() {} - -func (m *Cursor) GetCursor() uint64 { - if m != nil && m.Cursor != nil { - return *m.Cursor - } - return 0 -} - -func (m *Cursor) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -type Error struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} - -type Cost struct { - IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` - IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` - EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` - EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` - Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` - ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` - IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cost) Reset() { *m = Cost{} } -func (m *Cost) String() string { return proto.CompactTextString(m) } -func (*Cost) ProtoMessage() {} - -func (m *Cost) GetIndexWrites() int32 { - if m != nil && m.IndexWrites != nil { - return *m.IndexWrites - } - return 0 -} - -func (m *Cost) GetIndexWriteBytes() int32 { - if m != nil && m.IndexWriteBytes != nil { - return *m.IndexWriteBytes - } - return 0 -} - -func (m *Cost) GetEntityWrites() int32 { - if m != nil && m.EntityWrites != nil { - return *m.EntityWrites - } - return 0 -} - -func (m *Cost) GetEntityWriteBytes() int32 { - if m != nil && m.EntityWriteBytes != nil { - return *m.EntityWriteBytes - } - return 0 -} - -func (m *Cost) GetCommitcost() *Cost_CommitCost { - if m != nil { - return m.Commitcost - } - return nil -} - -func (m *Cost) GetApproximateStorageDelta() int32 { - if m != nil && m.ApproximateStorageDelta != nil { - return *m.ApproximateStorageDelta - } - return 0 -} - -func (m *Cost) GetIdSequenceUpdates() int32 { - if m != nil && m.IdSequenceUpdates != nil { - return *m.IdSequenceUpdates - } - return 0 -} - -type Cost_CommitCost struct { - RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` - RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } -func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } -func (*Cost_CommitCost) ProtoMessage() {} - -func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { - if m != nil && m.RequestedEntityPuts != nil { - return *m.RequestedEntityPuts - } - return 0 -} - -func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { - if m != nil && m.RequestedEntityDeletes != nil { - return *m.RequestedEntityDeletes - } - return 0 -} - -type GetRequest struct { - Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` - AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} - -const Default_GetRequest_AllowDeferred bool = false - -func (m *GetRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *GetRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *GetRequest) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *GetRequest) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *GetRequest) GetAllowDeferred() bool { - if m != nil && m.AllowDeferred != nil { - return *m.AllowDeferred - } - return Default_GetRequest_AllowDeferred -} - -type GetResponse struct { - Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` - Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` - InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} - -const Default_GetResponse_InOrder bool = true - -func (m *GetResponse) GetEntity() []*GetResponse_Entity { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse) GetDeferred() []*Reference { - if m != nil { - return m.Deferred - } - return nil -} - -func (m *GetResponse) GetInOrder() bool { - if m != nil && m.InOrder != nil { - return *m.InOrder - } - return Default_GetResponse_InOrder -} - -type GetResponse_Entity struct { - Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` - Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` - Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } -func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } -func (*GetResponse_Entity) ProtoMessage() {} - -func (m *GetResponse_Entity) GetEntity() *EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse_Entity) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetResponse_Entity) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -type PutRequest struct { - Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` - Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} - -const Default_PutRequest_Trusted bool = false -const Default_PutRequest_Force bool = false -const Default_PutRequest_MarkChanges bool = false -const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT - -func (m *PutRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutRequest) GetEntity() []*EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *PutRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *PutRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_PutRequest_Trusted -} - -func (m *PutRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_PutRequest_Force -} - -func (m *PutRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_PutRequest_MarkChanges -} - -func (m *PutRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { - if m != nil && m.AutoIdPolicy != nil { - return *m.AutoIdPolicy - } - return Default_PutRequest_AutoIdPolicy -} - -type PutResponse struct { - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} - -func (m *PutResponse) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *PutResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type TouchRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` - Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TouchRequest) Reset() { *m = TouchRequest{} } -func (m *TouchRequest) String() string { return proto.CompactTextString(m) } -func (*TouchRequest) ProtoMessage() {} - -const Default_TouchRequest_Force bool = false - -func (m *TouchRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TouchRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *TouchRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_TouchRequest_Force -} - -func (m *TouchRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type TouchResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TouchResponse) Reset() { *m = TouchResponse{} } -func (m *TouchResponse) String() string { return proto.CompactTextString(m) } -func (*TouchResponse) ProtoMessage() {} - -func (m *TouchResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type DeleteRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} - -const Default_DeleteRequest_Trusted bool = false -const Default_DeleteRequest_Force bool = false -const Default_DeleteRequest_MarkChanges bool = false - -func (m *DeleteRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *DeleteRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_DeleteRequest_Trusted -} - -func (m *DeleteRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_DeleteRequest_Force -} - -func (m *DeleteRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_DeleteRequest_MarkChanges -} - -func (m *DeleteRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type DeleteResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteResponse) ProtoMessage() {} - -func (m *DeleteResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *DeleteResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type NextRequest struct { - Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` - Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` - Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` - Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NextRequest) Reset() { *m = NextRequest{} } -func (m *NextRequest) String() string { return proto.CompactTextString(m) } -func (*NextRequest) ProtoMessage() {} - -const Default_NextRequest_Offset int32 = 0 -const Default_NextRequest_Compile bool = false - -func (m *NextRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *NextRequest) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *NextRequest) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *NextRequest) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_NextRequest_Offset -} - -func (m *NextRequest) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_NextRequest_Compile -} - -type QueryResult struct { - Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` - Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` - SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` - MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` - KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` - IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` - SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` - CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` - Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` - Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} - -func (m *QueryResult) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *QueryResult) GetResult() []*EntityProto { - if m != nil { - return m.Result - } - return nil -} - -func (m *QueryResult) GetSkippedResults() int32 { - if m != nil && m.SkippedResults != nil { - return *m.SkippedResults - } - return 0 -} - -func (m *QueryResult) GetMoreResults() bool { - if m != nil && m.MoreResults != nil { - return *m.MoreResults - } - return false -} - -func (m *QueryResult) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *QueryResult) GetIndexOnly() bool { - if m != nil && m.IndexOnly != nil { - return *m.IndexOnly - } - return false -} - -func (m *QueryResult) GetSmallOps() bool { - if m != nil && m.SmallOps != nil { - return *m.SmallOps - } - return false -} - -func (m *QueryResult) GetCompiledQuery() *CompiledQuery { - if m != nil { - return m.CompiledQuery - } - return nil -} - -func (m *QueryResult) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *QueryResult) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -func (m *QueryResult) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type AllocateIdsRequest struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` - Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` - Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsRequest) ProtoMessage() {} - -func (m *AllocateIdsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AllocateIdsRequest) GetModelKey() *Reference { - if m != nil { - return m.ModelKey - } - return nil -} - -func (m *AllocateIdsRequest) GetSize() int64 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *AllocateIdsRequest) GetMax() int64 { - if m != nil && m.Max != nil { - return *m.Max - } - return 0 -} - -func (m *AllocateIdsRequest) GetReserve() []*Reference { - if m != nil { - return m.Reserve - } - return nil -} - -type AllocateIdsResponse struct { - Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` - End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` - Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsResponse) ProtoMessage() {} - -func (m *AllocateIdsResponse) GetStart() int64 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *AllocateIdsResponse) GetEnd() int64 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *AllocateIdsResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type CompositeIndices struct { - Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } -func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } -func (*CompositeIndices) ProtoMessage() {} - -func (m *CompositeIndices) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -type AddActionsRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` - Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } -func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddActionsRequest) ProtoMessage() {} - -func (m *AddActionsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AddActionsRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *AddActionsRequest) GetAction() []*Action { - if m != nil { - return m.Action - } - return nil -} - -type AddActionsResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } -func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } -func (*AddActionsResponse) ProtoMessage() {} - -type BeginTransactionRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionRequest) ProtoMessage() {} - -const Default_BeginTransactionRequest_AllowMultipleEg bool = false - -func (m *BeginTransactionRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *BeginTransactionRequest) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { - if m != nil && m.AllowMultipleEg != nil { - return *m.AllowMultipleEg - } - return Default_BeginTransactionRequest_AllowMultipleEg -} - -type CommitResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse) Reset() { *m = CommitResponse{} } -func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -func (*CommitResponse) ProtoMessage() {} - -func (m *CommitResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *CommitResponse) GetVersion() []*CommitResponse_Version { - if m != nil { - return m.Version - } - return nil -} - -type CommitResponse_Version struct { - RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` - Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } -func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } -func (*CommitResponse_Version) ProtoMessage() {} - -func (m *CommitResponse_Version) GetRootEntityKey() *Reference { - if m != nil { - return m.RootEntityKey - } - return nil -} - -func (m *CommitResponse_Version) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto deleted file mode 100644 index e76f126..0000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto +++ /dev/null @@ -1,541 +0,0 @@ -syntax = "proto2"; -option go_package = "datastore"; - -package appengine; - -message Action{} - -message PropertyValue { - optional int64 int64Value = 1; - optional bool booleanValue = 2; - optional string stringValue = 3; - optional double doubleValue = 4; - - optional group PointValue = 5 { - required double x = 6; - required double y = 7; - } - - optional group UserValue = 8 { - required string email = 9; - required string auth_domain = 10; - optional string nickname = 11; - optional string federated_identity = 21; - optional string federated_provider = 22; - } - - optional group ReferenceValue = 12 { - required string app = 13; - optional string name_space = 20; - repeated group PathElement = 14 { - required string type = 15; - optional int64 id = 16; - optional string name = 17; - } - } -} - -message Property { - enum Meaning { - NO_MEANING = 0; - BLOB = 14; - TEXT = 15; - BYTESTRING = 16; - - ATOM_CATEGORY = 1; - ATOM_LINK = 2; - ATOM_TITLE = 3; - ATOM_CONTENT = 4; - ATOM_SUMMARY = 5; - ATOM_AUTHOR = 6; - - GD_WHEN = 7; - GD_EMAIL = 8; - GEORSS_POINT = 9; - GD_IM = 10; - - GD_PHONENUMBER = 11; - GD_POSTALADDRESS = 12; - - GD_RATING = 13; - - BLOBKEY = 17; - ENTITY_PROTO = 19; - - INDEX_VALUE = 18; - }; - - optional Meaning meaning = 1 [default = NO_MEANING]; - optional string meaning_uri = 2; - - required string name = 3; - - required PropertyValue value = 5; - - required bool multiple = 4; - - optional bool searchable = 6 [default=false]; - - enum FtsTokenizationOption { - HTML = 1; - ATOM = 2; - } - - optional FtsTokenizationOption fts_tokenization_option = 8; - - optional string locale = 9 [default = "en"]; -} - -message Path { - repeated group Element = 1 { - required string type = 2; - optional int64 id = 3; - optional string name = 4; - } -} - -message Reference { - required string app = 13; - optional string name_space = 20; - required Path path = 14; -} - -message User { - required string email = 1; - required string auth_domain = 2; - optional string nickname = 3; - optional string federated_identity = 6; - optional string federated_provider = 7; -} - -message EntityProto { - required Reference key = 13; - required Path entity_group = 16; - optional User owner = 17; - - enum Kind { - GD_CONTACT = 1; - GD_EVENT = 2; - GD_MESSAGE = 3; - } - optional Kind kind = 4; - optional string kind_uri = 5; - - repeated Property property = 14; - repeated Property raw_property = 15; - - optional int32 rank = 18; -} - -message CompositeProperty { - required int64 index_id = 1; - repeated string value = 2; -} - -message Index { - required string entity_type = 1; - required bool ancestor = 5; - repeated group Property = 2 { - required string name = 3; - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - optional Direction direction = 4 [default = ASCENDING]; - } -} - -message CompositeIndex { - required string app_id = 1; - required int64 id = 2; - required Index definition = 3; - - enum State { - WRITE_ONLY = 1; - READ_WRITE = 2; - DELETED = 3; - ERROR = 4; - } - required State state = 4; - - optional bool only_use_if_required = 6 [default = false]; -} - -message IndexPostfix { - message IndexValue { - required string property_name = 1; - required PropertyValue value = 2; - } - - repeated IndexValue index_value = 1; - - optional Reference key = 2; - - optional bool before = 3 [default=true]; -} - -message IndexPosition { - optional string key = 1; - - optional bool before = 2 [default=true]; -} - -message Snapshot { - enum Status { - INACTIVE = 0; - ACTIVE = 1; - } - - required int64 ts = 1; -} - -message InternalHeader { - optional string qos = 1; -} - -message Transaction { - optional InternalHeader header = 4; - required fixed64 handle = 1; - required string app = 2; - optional bool mark_changes = 3 [default = false]; -} - -message Query { - optional InternalHeader header = 39; - - required string app = 1; - optional string name_space = 29; - - optional string kind = 3; - optional Reference ancestor = 17; - - repeated group Filter = 4 { - enum Operator { - LESS_THAN = 1; - LESS_THAN_OR_EQUAL = 2; - GREATER_THAN = 3; - GREATER_THAN_OR_EQUAL = 4; - EQUAL = 5; - IN = 6; - EXISTS = 7; - } - - required Operator op = 6; - repeated Property property = 14; - } - - optional string search_query = 8; - - repeated group Order = 9 { - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - - required string property = 10; - optional Direction direction = 11 [default = ASCENDING]; - } - - enum Hint { - ORDER_FIRST = 1; - ANCESTOR_FIRST = 2; - FILTER_FIRST = 3; - } - optional Hint hint = 18; - - optional int32 count = 23; - - optional int32 offset = 12 [default = 0]; - - optional int32 limit = 16; - - optional CompiledCursor compiled_cursor = 30; - optional CompiledCursor end_compiled_cursor = 31; - - repeated CompositeIndex composite_index = 19; - - optional bool require_perfect_plan = 20 [default = false]; - - optional bool keys_only = 21 [default = false]; - - optional Transaction transaction = 22; - - optional bool compile = 25 [default = false]; - - optional int64 failover_ms = 26; - - optional bool strong = 32; - - repeated string property_name = 33; - - repeated string group_by_property_name = 34; - - optional bool distinct = 24; - - optional int64 min_safe_time_seconds = 35; - - repeated string safe_replica_name = 36; - - optional bool persist_offset = 37 [default=false]; -} - -message CompiledQuery { - required group PrimaryScan = 1 { - optional string index_name = 2; - - optional string start_key = 3; - optional bool start_inclusive = 4; - optional string end_key = 5; - optional bool end_inclusive = 6; - - repeated string start_postfix_value = 22; - repeated string end_postfix_value = 23; - - optional int64 end_unapplied_log_timestamp_us = 19; - } - - repeated group MergeJoinScan = 7 { - required string index_name = 8; - - repeated string prefix_value = 9; - - optional bool value_prefix = 20 [default=false]; - } - - optional Index index_def = 21; - - optional int32 offset = 10 [default = 0]; - - optional int32 limit = 11; - - required bool keys_only = 12; - - repeated string property_name = 24; - - optional int32 distinct_infix_size = 25; - - optional group EntityFilter = 13 { - optional bool distinct = 14 [default=false]; - - optional string kind = 17; - optional Reference ancestor = 18; - } -} - -message CompiledCursor { - optional group Position = 2 { - optional string start_key = 27; - - repeated group IndexValue = 29 { - optional string property = 30; - required PropertyValue value = 31; - } - - optional Reference key = 32; - - optional bool start_inclusive = 28 [default=true]; - } -} - -message Cursor { - required fixed64 cursor = 1; - - optional string app = 2; -} - -message Error { - enum ErrorCode { - BAD_REQUEST = 1; - CONCURRENT_TRANSACTION = 2; - INTERNAL_ERROR = 3; - NEED_INDEX = 4; - TIMEOUT = 5; - PERMISSION_DENIED = 6; - BIGTABLE_ERROR = 7; - COMMITTED_BUT_STILL_APPLYING = 8; - CAPABILITY_DISABLED = 9; - TRY_ALTERNATE_BACKEND = 10; - SAFE_TIME_TOO_OLD = 11; - } -} - -message Cost { - optional int32 index_writes = 1; - optional int32 index_write_bytes = 2; - optional int32 entity_writes = 3; - optional int32 entity_write_bytes = 4; - optional group CommitCost = 5 { - optional int32 requested_entity_puts = 6; - optional int32 requested_entity_deletes = 7; - }; - optional int32 approximate_storage_delta = 8; - optional int32 id_sequence_updates = 9; -} - -message GetRequest { - optional InternalHeader header = 6; - - repeated Reference key = 1; - optional Transaction transaction = 2; - - optional int64 failover_ms = 3; - - optional bool strong = 4; - - optional bool allow_deferred = 5 [default=false]; -} - -message GetResponse { - repeated group Entity = 1 { - optional EntityProto entity = 2; - optional Reference key = 4; - - optional int64 version = 3; - } - - repeated Reference deferred = 5; - - optional bool in_order = 6 [default=true]; -} - -message PutRequest { - optional InternalHeader header = 11; - - repeated EntityProto entity = 1; - optional Transaction transaction = 2; - repeated CompositeIndex composite_index = 3; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; - - enum AutoIdPolicy { - CURRENT = 0; - SEQUENTIAL = 1; - } - optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; -} - -message PutResponse { - repeated Reference key = 1; - optional Cost cost = 2; - repeated int64 version = 3; -} - -message TouchRequest { - optional InternalHeader header = 10; - - repeated Reference key = 1; - repeated CompositeIndex composite_index = 2; - optional bool force = 3 [default = false]; - repeated Snapshot snapshot = 9; -} - -message TouchResponse { - optional Cost cost = 1; -} - -message DeleteRequest { - optional InternalHeader header = 10; - - repeated Reference key = 6; - optional Transaction transaction = 5; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; -} - -message DeleteResponse { - optional Cost cost = 1; - repeated int64 version = 3; -} - -message NextRequest { - optional InternalHeader header = 5; - - required Cursor cursor = 1; - optional int32 count = 2; - - optional int32 offset = 4 [default = 0]; - - optional bool compile = 3 [default = false]; -} - -message QueryResult { - optional Cursor cursor = 1; - - repeated EntityProto result = 2; - - optional int32 skipped_results = 7; - - required bool more_results = 3; - - optional bool keys_only = 4; - - optional bool index_only = 9; - - optional bool small_ops = 10; - - optional CompiledQuery compiled_query = 5; - - optional CompiledCursor compiled_cursor = 6; - - repeated CompositeIndex index = 8; - - repeated int64 version = 11; -} - -message AllocateIdsRequest { - optional InternalHeader header = 4; - - optional Reference model_key = 1; - - optional int64 size = 2; - - optional int64 max = 3; - - repeated Reference reserve = 5; -} - -message AllocateIdsResponse { - required int64 start = 1; - required int64 end = 2; - optional Cost cost = 3; -} - -message CompositeIndices { - repeated CompositeIndex index = 1; -} - -message AddActionsRequest { - optional InternalHeader header = 3; - - required Transaction transaction = 1; - repeated Action action = 2; -} - -message AddActionsResponse { -} - -message BeginTransactionRequest { - optional InternalHeader header = 3; - - required string app = 1; - optional bool allow_multiple_eg = 2 [default = false]; -} - -message CommitResponse { - optional Cost cost = 1; - - repeated group Version = 3 { - required Reference root_entity_key = 4; - required int64 version = 5; - } -} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go deleted file mode 100644 index d538701..0000000 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import netcontext "golang.org/x/net/context" - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -func AppID(c netcontext.Context) string { - return appID(FullyQualifiedAppID(c)) -} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go deleted file mode 100644 index e6b9227..0000000 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine" - - netcontext "golang.org/x/net/context" -) - -func DefaultVersionHostname(ctx netcontext.Context) string { - return appengine.DefaultVersionHostname(fromContext(ctx)) -} - -func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } -func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } - -func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go deleted file mode 100644 index ebe68b7..0000000 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "net/http" - "os" - - netcontext "golang.org/x/net/context" -) - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -const ( - hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" - hRequestLogId = "X-AppEngine-Request-Log-Id" - hDatacenter = "X-AppEngine-Datacenter" -) - -func ctxHeaders(ctx netcontext.Context) http.Header { - return fromContext(ctx).Request().Header -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDefaultVersionHostname) -} - -func RequestID(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hRequestLogId) -} - -func Datacenter(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDatacenter) -} - -func ServerSoftware() string { - // TODO(dsymonds): Remove fallback when we've verified this. - if s := os.Getenv("SERVER_SOFTWARE"); s != "" { - return s - } - return "Google App Engine/1.x.x" -} - -// TODO(dsymonds): Remove the metadata fetches. - -func ModuleName(_ netcontext.Context) string { - if s := os.Getenv("GAE_MODULE_NAME"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_name")) -} - -func VersionID(_ netcontext.Context) string { - if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) -} - -func InstanceID() string { - if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_instance")) -} - -func partitionlessAppID() string { - // gae_project has everything except the partition prefix. - appID := os.Getenv("GAE_LONG_APP_ID") - if appID == "" { - appID = string(mustGetMetadata("instance/attributes/gae_project")) - } - return appID -} - -func fullyQualifiedAppID(_ netcontext.Context) string { - appID := partitionlessAppID() - - part := os.Getenv("GAE_PARTITION") - if part == "" { - part = string(mustGetMetadata("instance/attributes/gae_partition")) - } - - if part != "" { - appID = part + "~" + appID - } - return appID -} - -func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" -} diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.pb.go b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go deleted file mode 100644 index ba7c722..0000000 --- a/vendor/google.golang.org/appengine/internal/image/images_service.pb.go +++ /dev/null @@ -1,845 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/image/images_service.proto -// DO NOT EDIT! - -/* -Package image is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/image/images_service.proto - -It has these top-level messages: - ImagesServiceError - ImagesServiceTransform - Transform - ImageData - InputSettings - OutputSettings - ImagesTransformRequest - ImagesTransformResponse - CompositeImageOptions - ImagesCanvas - ImagesCompositeRequest - ImagesCompositeResponse - ImagesHistogramRequest - ImagesHistogram - ImagesHistogramResponse - ImagesGetUrlBaseRequest - ImagesGetUrlBaseResponse - ImagesDeleteUrlBaseRequest - ImagesDeleteUrlBaseResponse -*/ -package image - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type ImagesServiceError_ErrorCode int32 - -const ( - ImagesServiceError_UNSPECIFIED_ERROR ImagesServiceError_ErrorCode = 1 - ImagesServiceError_BAD_TRANSFORM_DATA ImagesServiceError_ErrorCode = 2 - ImagesServiceError_NOT_IMAGE ImagesServiceError_ErrorCode = 3 - ImagesServiceError_BAD_IMAGE_DATA ImagesServiceError_ErrorCode = 4 - ImagesServiceError_IMAGE_TOO_LARGE ImagesServiceError_ErrorCode = 5 - ImagesServiceError_INVALID_BLOB_KEY ImagesServiceError_ErrorCode = 6 - ImagesServiceError_ACCESS_DENIED ImagesServiceError_ErrorCode = 7 - ImagesServiceError_OBJECT_NOT_FOUND ImagesServiceError_ErrorCode = 8 -) - -var ImagesServiceError_ErrorCode_name = map[int32]string{ - 1: "UNSPECIFIED_ERROR", - 2: "BAD_TRANSFORM_DATA", - 3: "NOT_IMAGE", - 4: "BAD_IMAGE_DATA", - 5: "IMAGE_TOO_LARGE", - 6: "INVALID_BLOB_KEY", - 7: "ACCESS_DENIED", - 8: "OBJECT_NOT_FOUND", -} -var ImagesServiceError_ErrorCode_value = map[string]int32{ - "UNSPECIFIED_ERROR": 1, - "BAD_TRANSFORM_DATA": 2, - "NOT_IMAGE": 3, - "BAD_IMAGE_DATA": 4, - "IMAGE_TOO_LARGE": 5, - "INVALID_BLOB_KEY": 6, - "ACCESS_DENIED": 7, - "OBJECT_NOT_FOUND": 8, -} - -func (x ImagesServiceError_ErrorCode) Enum() *ImagesServiceError_ErrorCode { - p := new(ImagesServiceError_ErrorCode) - *p = x - return p -} -func (x ImagesServiceError_ErrorCode) String() string { - return proto.EnumName(ImagesServiceError_ErrorCode_name, int32(x)) -} -func (x *ImagesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ImagesServiceError_ErrorCode_value, data, "ImagesServiceError_ErrorCode") - if err != nil { - return err - } - *x = ImagesServiceError_ErrorCode(value) - return nil -} - -type ImagesServiceTransform_Type int32 - -const ( - ImagesServiceTransform_RESIZE ImagesServiceTransform_Type = 1 - ImagesServiceTransform_ROTATE ImagesServiceTransform_Type = 2 - ImagesServiceTransform_HORIZONTAL_FLIP ImagesServiceTransform_Type = 3 - ImagesServiceTransform_VERTICAL_FLIP ImagesServiceTransform_Type = 4 - ImagesServiceTransform_CROP ImagesServiceTransform_Type = 5 - ImagesServiceTransform_IM_FEELING_LUCKY ImagesServiceTransform_Type = 6 -) - -var ImagesServiceTransform_Type_name = map[int32]string{ - 1: "RESIZE", - 2: "ROTATE", - 3: "HORIZONTAL_FLIP", - 4: "VERTICAL_FLIP", - 5: "CROP", - 6: "IM_FEELING_LUCKY", -} -var ImagesServiceTransform_Type_value = map[string]int32{ - "RESIZE": 1, - "ROTATE": 2, - "HORIZONTAL_FLIP": 3, - "VERTICAL_FLIP": 4, - "CROP": 5, - "IM_FEELING_LUCKY": 6, -} - -func (x ImagesServiceTransform_Type) Enum() *ImagesServiceTransform_Type { - p := new(ImagesServiceTransform_Type) - *p = x - return p -} -func (x ImagesServiceTransform_Type) String() string { - return proto.EnumName(ImagesServiceTransform_Type_name, int32(x)) -} -func (x *ImagesServiceTransform_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ImagesServiceTransform_Type_value, data, "ImagesServiceTransform_Type") - if err != nil { - return err - } - *x = ImagesServiceTransform_Type(value) - return nil -} - -type InputSettings_ORIENTATION_CORRECTION_TYPE int32 - -const ( - InputSettings_UNCHANGED_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 0 - InputSettings_CORRECT_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 1 -) - -var InputSettings_ORIENTATION_CORRECTION_TYPE_name = map[int32]string{ - 0: "UNCHANGED_ORIENTATION", - 1: "CORRECT_ORIENTATION", -} -var InputSettings_ORIENTATION_CORRECTION_TYPE_value = map[string]int32{ - "UNCHANGED_ORIENTATION": 0, - "CORRECT_ORIENTATION": 1, -} - -func (x InputSettings_ORIENTATION_CORRECTION_TYPE) Enum() *InputSettings_ORIENTATION_CORRECTION_TYPE { - p := new(InputSettings_ORIENTATION_CORRECTION_TYPE) - *p = x - return p -} -func (x InputSettings_ORIENTATION_CORRECTION_TYPE) String() string { - return proto.EnumName(InputSettings_ORIENTATION_CORRECTION_TYPE_name, int32(x)) -} -func (x *InputSettings_ORIENTATION_CORRECTION_TYPE) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(InputSettings_ORIENTATION_CORRECTION_TYPE_value, data, "InputSettings_ORIENTATION_CORRECTION_TYPE") - if err != nil { - return err - } - *x = InputSettings_ORIENTATION_CORRECTION_TYPE(value) - return nil -} - -type OutputSettings_MIME_TYPE int32 - -const ( - OutputSettings_PNG OutputSettings_MIME_TYPE = 0 - OutputSettings_JPEG OutputSettings_MIME_TYPE = 1 - OutputSettings_WEBP OutputSettings_MIME_TYPE = 2 -) - -var OutputSettings_MIME_TYPE_name = map[int32]string{ - 0: "PNG", - 1: "JPEG", - 2: "WEBP", -} -var OutputSettings_MIME_TYPE_value = map[string]int32{ - "PNG": 0, - "JPEG": 1, - "WEBP": 2, -} - -func (x OutputSettings_MIME_TYPE) Enum() *OutputSettings_MIME_TYPE { - p := new(OutputSettings_MIME_TYPE) - *p = x - return p -} -func (x OutputSettings_MIME_TYPE) String() string { - return proto.EnumName(OutputSettings_MIME_TYPE_name, int32(x)) -} -func (x *OutputSettings_MIME_TYPE) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(OutputSettings_MIME_TYPE_value, data, "OutputSettings_MIME_TYPE") - if err != nil { - return err - } - *x = OutputSettings_MIME_TYPE(value) - return nil -} - -type CompositeImageOptions_ANCHOR int32 - -const ( - CompositeImageOptions_TOP_LEFT CompositeImageOptions_ANCHOR = 0 - CompositeImageOptions_TOP CompositeImageOptions_ANCHOR = 1 - CompositeImageOptions_TOP_RIGHT CompositeImageOptions_ANCHOR = 2 - CompositeImageOptions_LEFT CompositeImageOptions_ANCHOR = 3 - CompositeImageOptions_CENTER CompositeImageOptions_ANCHOR = 4 - CompositeImageOptions_RIGHT CompositeImageOptions_ANCHOR = 5 - CompositeImageOptions_BOTTOM_LEFT CompositeImageOptions_ANCHOR = 6 - CompositeImageOptions_BOTTOM CompositeImageOptions_ANCHOR = 7 - CompositeImageOptions_BOTTOM_RIGHT CompositeImageOptions_ANCHOR = 8 -) - -var CompositeImageOptions_ANCHOR_name = map[int32]string{ - 0: "TOP_LEFT", - 1: "TOP", - 2: "TOP_RIGHT", - 3: "LEFT", - 4: "CENTER", - 5: "RIGHT", - 6: "BOTTOM_LEFT", - 7: "BOTTOM", - 8: "BOTTOM_RIGHT", -} -var CompositeImageOptions_ANCHOR_value = map[string]int32{ - "TOP_LEFT": 0, - "TOP": 1, - "TOP_RIGHT": 2, - "LEFT": 3, - "CENTER": 4, - "RIGHT": 5, - "BOTTOM_LEFT": 6, - "BOTTOM": 7, - "BOTTOM_RIGHT": 8, -} - -func (x CompositeImageOptions_ANCHOR) Enum() *CompositeImageOptions_ANCHOR { - p := new(CompositeImageOptions_ANCHOR) - *p = x - return p -} -func (x CompositeImageOptions_ANCHOR) String() string { - return proto.EnumName(CompositeImageOptions_ANCHOR_name, int32(x)) -} -func (x *CompositeImageOptions_ANCHOR) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeImageOptions_ANCHOR_value, data, "CompositeImageOptions_ANCHOR") - if err != nil { - return err - } - *x = CompositeImageOptions_ANCHOR(value) - return nil -} - -type ImagesServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesServiceError) Reset() { *m = ImagesServiceError{} } -func (m *ImagesServiceError) String() string { return proto.CompactTextString(m) } -func (*ImagesServiceError) ProtoMessage() {} - -type ImagesServiceTransform struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesServiceTransform) Reset() { *m = ImagesServiceTransform{} } -func (m *ImagesServiceTransform) String() string { return proto.CompactTextString(m) } -func (*ImagesServiceTransform) ProtoMessage() {} - -type Transform struct { - Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"` - Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"` - CropToFit *bool `protobuf:"varint,11,opt,name=crop_to_fit,def=0" json:"crop_to_fit,omitempty"` - CropOffsetX *float32 `protobuf:"fixed32,12,opt,name=crop_offset_x,def=0.5" json:"crop_offset_x,omitempty"` - CropOffsetY *float32 `protobuf:"fixed32,13,opt,name=crop_offset_y,def=0.5" json:"crop_offset_y,omitempty"` - Rotate *int32 `protobuf:"varint,3,opt,name=rotate,def=0" json:"rotate,omitempty"` - HorizontalFlip *bool `protobuf:"varint,4,opt,name=horizontal_flip,def=0" json:"horizontal_flip,omitempty"` - VerticalFlip *bool `protobuf:"varint,5,opt,name=vertical_flip,def=0" json:"vertical_flip,omitempty"` - CropLeftX *float32 `protobuf:"fixed32,6,opt,name=crop_left_x,def=0" json:"crop_left_x,omitempty"` - CropTopY *float32 `protobuf:"fixed32,7,opt,name=crop_top_y,def=0" json:"crop_top_y,omitempty"` - CropRightX *float32 `protobuf:"fixed32,8,opt,name=crop_right_x,def=1" json:"crop_right_x,omitempty"` - CropBottomY *float32 `protobuf:"fixed32,9,opt,name=crop_bottom_y,def=1" json:"crop_bottom_y,omitempty"` - Autolevels *bool `protobuf:"varint,10,opt,name=autolevels,def=0" json:"autolevels,omitempty"` - AllowStretch *bool `protobuf:"varint,14,opt,name=allow_stretch,def=0" json:"allow_stretch,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Transform) Reset() { *m = Transform{} } -func (m *Transform) String() string { return proto.CompactTextString(m) } -func (*Transform) ProtoMessage() {} - -const Default_Transform_CropToFit bool = false -const Default_Transform_CropOffsetX float32 = 0.5 -const Default_Transform_CropOffsetY float32 = 0.5 -const Default_Transform_Rotate int32 = 0 -const Default_Transform_HorizontalFlip bool = false -const Default_Transform_VerticalFlip bool = false -const Default_Transform_CropLeftX float32 = 0 -const Default_Transform_CropTopY float32 = 0 -const Default_Transform_CropRightX float32 = 1 -const Default_Transform_CropBottomY float32 = 1 -const Default_Transform_Autolevels bool = false -const Default_Transform_AllowStretch bool = false - -func (m *Transform) GetWidth() int32 { - if m != nil && m.Width != nil { - return *m.Width - } - return 0 -} - -func (m *Transform) GetHeight() int32 { - if m != nil && m.Height != nil { - return *m.Height - } - return 0 -} - -func (m *Transform) GetCropToFit() bool { - if m != nil && m.CropToFit != nil { - return *m.CropToFit - } - return Default_Transform_CropToFit -} - -func (m *Transform) GetCropOffsetX() float32 { - if m != nil && m.CropOffsetX != nil { - return *m.CropOffsetX - } - return Default_Transform_CropOffsetX -} - -func (m *Transform) GetCropOffsetY() float32 { - if m != nil && m.CropOffsetY != nil { - return *m.CropOffsetY - } - return Default_Transform_CropOffsetY -} - -func (m *Transform) GetRotate() int32 { - if m != nil && m.Rotate != nil { - return *m.Rotate - } - return Default_Transform_Rotate -} - -func (m *Transform) GetHorizontalFlip() bool { - if m != nil && m.HorizontalFlip != nil { - return *m.HorizontalFlip - } - return Default_Transform_HorizontalFlip -} - -func (m *Transform) GetVerticalFlip() bool { - if m != nil && m.VerticalFlip != nil { - return *m.VerticalFlip - } - return Default_Transform_VerticalFlip -} - -func (m *Transform) GetCropLeftX() float32 { - if m != nil && m.CropLeftX != nil { - return *m.CropLeftX - } - return Default_Transform_CropLeftX -} - -func (m *Transform) GetCropTopY() float32 { - if m != nil && m.CropTopY != nil { - return *m.CropTopY - } - return Default_Transform_CropTopY -} - -func (m *Transform) GetCropRightX() float32 { - if m != nil && m.CropRightX != nil { - return *m.CropRightX - } - return Default_Transform_CropRightX -} - -func (m *Transform) GetCropBottomY() float32 { - if m != nil && m.CropBottomY != nil { - return *m.CropBottomY - } - return Default_Transform_CropBottomY -} - -func (m *Transform) GetAutolevels() bool { - if m != nil && m.Autolevels != nil { - return *m.Autolevels - } - return Default_Transform_Autolevels -} - -func (m *Transform) GetAllowStretch() bool { - if m != nil && m.AllowStretch != nil { - return *m.AllowStretch - } - return Default_Transform_AllowStretch -} - -type ImageData struct { - Content []byte `protobuf:"bytes,1,req,name=content" json:"content,omitempty"` - BlobKey *string `protobuf:"bytes,2,opt,name=blob_key" json:"blob_key,omitempty"` - Width *int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"` - Height *int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImageData) Reset() { *m = ImageData{} } -func (m *ImageData) String() string { return proto.CompactTextString(m) } -func (*ImageData) ProtoMessage() {} - -func (m *ImageData) GetContent() []byte { - if m != nil { - return m.Content - } - return nil -} - -func (m *ImageData) GetBlobKey() string { - if m != nil && m.BlobKey != nil { - return *m.BlobKey - } - return "" -} - -func (m *ImageData) GetWidth() int32 { - if m != nil && m.Width != nil { - return *m.Width - } - return 0 -} - -func (m *ImageData) GetHeight() int32 { - if m != nil && m.Height != nil { - return *m.Height - } - return 0 -} - -type InputSettings struct { - CorrectExifOrientation *InputSettings_ORIENTATION_CORRECTION_TYPE `protobuf:"varint,1,opt,name=correct_exif_orientation,enum=appengine.InputSettings_ORIENTATION_CORRECTION_TYPE,def=0" json:"correct_exif_orientation,omitempty"` - ParseMetadata *bool `protobuf:"varint,2,opt,name=parse_metadata,def=0" json:"parse_metadata,omitempty"` - TransparentSubstitutionRgb *int32 `protobuf:"varint,3,opt,name=transparent_substitution_rgb" json:"transparent_substitution_rgb,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InputSettings) Reset() { *m = InputSettings{} } -func (m *InputSettings) String() string { return proto.CompactTextString(m) } -func (*InputSettings) ProtoMessage() {} - -const Default_InputSettings_CorrectExifOrientation InputSettings_ORIENTATION_CORRECTION_TYPE = InputSettings_UNCHANGED_ORIENTATION -const Default_InputSettings_ParseMetadata bool = false - -func (m *InputSettings) GetCorrectExifOrientation() InputSettings_ORIENTATION_CORRECTION_TYPE { - if m != nil && m.CorrectExifOrientation != nil { - return *m.CorrectExifOrientation - } - return Default_InputSettings_CorrectExifOrientation -} - -func (m *InputSettings) GetParseMetadata() bool { - if m != nil && m.ParseMetadata != nil { - return *m.ParseMetadata - } - return Default_InputSettings_ParseMetadata -} - -func (m *InputSettings) GetTransparentSubstitutionRgb() int32 { - if m != nil && m.TransparentSubstitutionRgb != nil { - return *m.TransparentSubstitutionRgb - } - return 0 -} - -type OutputSettings struct { - MimeType *OutputSettings_MIME_TYPE `protobuf:"varint,1,opt,name=mime_type,enum=appengine.OutputSettings_MIME_TYPE,def=0" json:"mime_type,omitempty"` - Quality *int32 `protobuf:"varint,2,opt,name=quality" json:"quality,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OutputSettings) Reset() { *m = OutputSettings{} } -func (m *OutputSettings) String() string { return proto.CompactTextString(m) } -func (*OutputSettings) ProtoMessage() {} - -const Default_OutputSettings_MimeType OutputSettings_MIME_TYPE = OutputSettings_PNG - -func (m *OutputSettings) GetMimeType() OutputSettings_MIME_TYPE { - if m != nil && m.MimeType != nil { - return *m.MimeType - } - return Default_OutputSettings_MimeType -} - -func (m *OutputSettings) GetQuality() int32 { - if m != nil && m.Quality != nil { - return *m.Quality - } - return 0 -} - -type ImagesTransformRequest struct { - Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` - Transform []*Transform `protobuf:"bytes,2,rep,name=transform" json:"transform,omitempty"` - Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"` - Input *InputSettings `protobuf:"bytes,4,opt,name=input" json:"input,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesTransformRequest) Reset() { *m = ImagesTransformRequest{} } -func (m *ImagesTransformRequest) String() string { return proto.CompactTextString(m) } -func (*ImagesTransformRequest) ProtoMessage() {} - -func (m *ImagesTransformRequest) GetImage() *ImageData { - if m != nil { - return m.Image - } - return nil -} - -func (m *ImagesTransformRequest) GetTransform() []*Transform { - if m != nil { - return m.Transform - } - return nil -} - -func (m *ImagesTransformRequest) GetOutput() *OutputSettings { - if m != nil { - return m.Output - } - return nil -} - -func (m *ImagesTransformRequest) GetInput() *InputSettings { - if m != nil { - return m.Input - } - return nil -} - -type ImagesTransformResponse struct { - Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` - SourceMetadata *string `protobuf:"bytes,2,opt,name=source_metadata" json:"source_metadata,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesTransformResponse) Reset() { *m = ImagesTransformResponse{} } -func (m *ImagesTransformResponse) String() string { return proto.CompactTextString(m) } -func (*ImagesTransformResponse) ProtoMessage() {} - -func (m *ImagesTransformResponse) GetImage() *ImageData { - if m != nil { - return m.Image - } - return nil -} - -func (m *ImagesTransformResponse) GetSourceMetadata() string { - if m != nil && m.SourceMetadata != nil { - return *m.SourceMetadata - } - return "" -} - -type CompositeImageOptions struct { - SourceIndex *int32 `protobuf:"varint,1,req,name=source_index" json:"source_index,omitempty"` - XOffset *int32 `protobuf:"varint,2,req,name=x_offset" json:"x_offset,omitempty"` - YOffset *int32 `protobuf:"varint,3,req,name=y_offset" json:"y_offset,omitempty"` - Opacity *float32 `protobuf:"fixed32,4,req,name=opacity" json:"opacity,omitempty"` - Anchor *CompositeImageOptions_ANCHOR `protobuf:"varint,5,req,name=anchor,enum=appengine.CompositeImageOptions_ANCHOR" json:"anchor,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeImageOptions) Reset() { *m = CompositeImageOptions{} } -func (m *CompositeImageOptions) String() string { return proto.CompactTextString(m) } -func (*CompositeImageOptions) ProtoMessage() {} - -func (m *CompositeImageOptions) GetSourceIndex() int32 { - if m != nil && m.SourceIndex != nil { - return *m.SourceIndex - } - return 0 -} - -func (m *CompositeImageOptions) GetXOffset() int32 { - if m != nil && m.XOffset != nil { - return *m.XOffset - } - return 0 -} - -func (m *CompositeImageOptions) GetYOffset() int32 { - if m != nil && m.YOffset != nil { - return *m.YOffset - } - return 0 -} - -func (m *CompositeImageOptions) GetOpacity() float32 { - if m != nil && m.Opacity != nil { - return *m.Opacity - } - return 0 -} - -func (m *CompositeImageOptions) GetAnchor() CompositeImageOptions_ANCHOR { - if m != nil && m.Anchor != nil { - return *m.Anchor - } - return CompositeImageOptions_TOP_LEFT -} - -type ImagesCanvas struct { - Width *int32 `protobuf:"varint,1,req,name=width" json:"width,omitempty"` - Height *int32 `protobuf:"varint,2,req,name=height" json:"height,omitempty"` - Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"` - Color *int32 `protobuf:"varint,4,opt,name=color,def=-1" json:"color,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesCanvas) Reset() { *m = ImagesCanvas{} } -func (m *ImagesCanvas) String() string { return proto.CompactTextString(m) } -func (*ImagesCanvas) ProtoMessage() {} - -const Default_ImagesCanvas_Color int32 = -1 - -func (m *ImagesCanvas) GetWidth() int32 { - if m != nil && m.Width != nil { - return *m.Width - } - return 0 -} - -func (m *ImagesCanvas) GetHeight() int32 { - if m != nil && m.Height != nil { - return *m.Height - } - return 0 -} - -func (m *ImagesCanvas) GetOutput() *OutputSettings { - if m != nil { - return m.Output - } - return nil -} - -func (m *ImagesCanvas) GetColor() int32 { - if m != nil && m.Color != nil { - return *m.Color - } - return Default_ImagesCanvas_Color -} - -type ImagesCompositeRequest struct { - Image []*ImageData `protobuf:"bytes,1,rep,name=image" json:"image,omitempty"` - Options []*CompositeImageOptions `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - Canvas *ImagesCanvas `protobuf:"bytes,3,req,name=canvas" json:"canvas,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesCompositeRequest) Reset() { *m = ImagesCompositeRequest{} } -func (m *ImagesCompositeRequest) String() string { return proto.CompactTextString(m) } -func (*ImagesCompositeRequest) ProtoMessage() {} - -func (m *ImagesCompositeRequest) GetImage() []*ImageData { - if m != nil { - return m.Image - } - return nil -} - -func (m *ImagesCompositeRequest) GetOptions() []*CompositeImageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *ImagesCompositeRequest) GetCanvas() *ImagesCanvas { - if m != nil { - return m.Canvas - } - return nil -} - -type ImagesCompositeResponse struct { - Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesCompositeResponse) Reset() { *m = ImagesCompositeResponse{} } -func (m *ImagesCompositeResponse) String() string { return proto.CompactTextString(m) } -func (*ImagesCompositeResponse) ProtoMessage() {} - -func (m *ImagesCompositeResponse) GetImage() *ImageData { - if m != nil { - return m.Image - } - return nil -} - -type ImagesHistogramRequest struct { - Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesHistogramRequest) Reset() { *m = ImagesHistogramRequest{} } -func (m *ImagesHistogramRequest) String() string { return proto.CompactTextString(m) } -func (*ImagesHistogramRequest) ProtoMessage() {} - -func (m *ImagesHistogramRequest) GetImage() *ImageData { - if m != nil { - return m.Image - } - return nil -} - -type ImagesHistogram struct { - Red []int32 `protobuf:"varint,1,rep,name=red" json:"red,omitempty"` - Green []int32 `protobuf:"varint,2,rep,name=green" json:"green,omitempty"` - Blue []int32 `protobuf:"varint,3,rep,name=blue" json:"blue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesHistogram) Reset() { *m = ImagesHistogram{} } -func (m *ImagesHistogram) String() string { return proto.CompactTextString(m) } -func (*ImagesHistogram) ProtoMessage() {} - -func (m *ImagesHistogram) GetRed() []int32 { - if m != nil { - return m.Red - } - return nil -} - -func (m *ImagesHistogram) GetGreen() []int32 { - if m != nil { - return m.Green - } - return nil -} - -func (m *ImagesHistogram) GetBlue() []int32 { - if m != nil { - return m.Blue - } - return nil -} - -type ImagesHistogramResponse struct { - Histogram *ImagesHistogram `protobuf:"bytes,1,req,name=histogram" json:"histogram,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesHistogramResponse) Reset() { *m = ImagesHistogramResponse{} } -func (m *ImagesHistogramResponse) String() string { return proto.CompactTextString(m) } -func (*ImagesHistogramResponse) ProtoMessage() {} - -func (m *ImagesHistogramResponse) GetHistogram() *ImagesHistogram { - if m != nil { - return m.Histogram - } - return nil -} - -type ImagesGetUrlBaseRequest struct { - BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` - CreateSecureUrl *bool `protobuf:"varint,2,opt,name=create_secure_url,def=0" json:"create_secure_url,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesGetUrlBaseRequest) Reset() { *m = ImagesGetUrlBaseRequest{} } -func (m *ImagesGetUrlBaseRequest) String() string { return proto.CompactTextString(m) } -func (*ImagesGetUrlBaseRequest) ProtoMessage() {} - -const Default_ImagesGetUrlBaseRequest_CreateSecureUrl bool = false - -func (m *ImagesGetUrlBaseRequest) GetBlobKey() string { - if m != nil && m.BlobKey != nil { - return *m.BlobKey - } - return "" -} - -func (m *ImagesGetUrlBaseRequest) GetCreateSecureUrl() bool { - if m != nil && m.CreateSecureUrl != nil { - return *m.CreateSecureUrl - } - return Default_ImagesGetUrlBaseRequest_CreateSecureUrl -} - -type ImagesGetUrlBaseResponse struct { - Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesGetUrlBaseResponse) Reset() { *m = ImagesGetUrlBaseResponse{} } -func (m *ImagesGetUrlBaseResponse) String() string { return proto.CompactTextString(m) } -func (*ImagesGetUrlBaseResponse) ProtoMessage() {} - -func (m *ImagesGetUrlBaseResponse) GetUrl() string { - if m != nil && m.Url != nil { - return *m.Url - } - return "" -} - -type ImagesDeleteUrlBaseRequest struct { - BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesDeleteUrlBaseRequest) Reset() { *m = ImagesDeleteUrlBaseRequest{} } -func (m *ImagesDeleteUrlBaseRequest) String() string { return proto.CompactTextString(m) } -func (*ImagesDeleteUrlBaseRequest) ProtoMessage() {} - -func (m *ImagesDeleteUrlBaseRequest) GetBlobKey() string { - if m != nil && m.BlobKey != nil { - return *m.BlobKey - } - return "" -} - -type ImagesDeleteUrlBaseResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ImagesDeleteUrlBaseResponse) Reset() { *m = ImagesDeleteUrlBaseResponse{} } -func (m *ImagesDeleteUrlBaseResponse) String() string { return proto.CompactTextString(m) } -func (*ImagesDeleteUrlBaseResponse) ProtoMessage() {} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.proto b/vendor/google.golang.org/appengine/internal/image/images_service.proto deleted file mode 100644 index f0d2ed5..0000000 --- a/vendor/google.golang.org/appengine/internal/image/images_service.proto +++ /dev/null @@ -1,162 +0,0 @@ -syntax = "proto2"; -option go_package = "image"; - -package appengine; - -message ImagesServiceError { - enum ErrorCode { - UNSPECIFIED_ERROR = 1; - BAD_TRANSFORM_DATA = 2; - NOT_IMAGE = 3; - BAD_IMAGE_DATA = 4; - IMAGE_TOO_LARGE = 5; - INVALID_BLOB_KEY = 6; - ACCESS_DENIED = 7; - OBJECT_NOT_FOUND = 8; - } -} - -message ImagesServiceTransform { - enum Type { - RESIZE = 1; - ROTATE = 2; - HORIZONTAL_FLIP = 3; - VERTICAL_FLIP = 4; - CROP = 5; - IM_FEELING_LUCKY = 6; - } -} - -message Transform { - optional int32 width = 1; - optional int32 height = 2; - optional bool crop_to_fit = 11 [default = false]; - optional float crop_offset_x = 12 [default = 0.5]; - optional float crop_offset_y = 13 [default = 0.5]; - - optional int32 rotate = 3 [default = 0]; - - optional bool horizontal_flip = 4 [default = false]; - - optional bool vertical_flip = 5 [default = false]; - - optional float crop_left_x = 6 [default = 0.0]; - optional float crop_top_y = 7 [default = 0.0]; - optional float crop_right_x = 8 [default = 1.0]; - optional float crop_bottom_y = 9 [default = 1.0]; - - optional bool autolevels = 10 [default = false]; - - optional bool allow_stretch = 14 [default = false]; -} - -message ImageData { - required bytes content = 1 [ctype=CORD]; - optional string blob_key = 2; - - optional int32 width = 3; - optional int32 height = 4; -} - -message InputSettings { - enum ORIENTATION_CORRECTION_TYPE { - UNCHANGED_ORIENTATION = 0; - CORRECT_ORIENTATION = 1; - } - optional ORIENTATION_CORRECTION_TYPE correct_exif_orientation = 1 - [default=UNCHANGED_ORIENTATION]; - optional bool parse_metadata = 2 [default=false]; - optional int32 transparent_substitution_rgb = 3; -} - -message OutputSettings { - enum MIME_TYPE { - PNG = 0; - JPEG = 1; - WEBP = 2; - } - - optional MIME_TYPE mime_type = 1 [default=PNG]; - optional int32 quality = 2; -} - -message ImagesTransformRequest { - required ImageData image = 1; - repeated Transform transform = 2; - required OutputSettings output = 3; - optional InputSettings input = 4; -} - -message ImagesTransformResponse { - required ImageData image = 1; - optional string source_metadata = 2; -} - -message CompositeImageOptions { - required int32 source_index = 1; - required int32 x_offset = 2; - required int32 y_offset = 3; - required float opacity = 4; - - enum ANCHOR { - TOP_LEFT = 0; - TOP = 1; - TOP_RIGHT = 2; - LEFT = 3; - CENTER = 4; - RIGHT = 5; - BOTTOM_LEFT = 6; - BOTTOM = 7; - BOTTOM_RIGHT = 8; - } - - required ANCHOR anchor = 5; -} - -message ImagesCanvas { - required int32 width = 1; - required int32 height = 2; - required OutputSettings output = 3; - optional int32 color = 4 [default=-1]; -} - -message ImagesCompositeRequest { - repeated ImageData image = 1; - repeated CompositeImageOptions options = 2; - required ImagesCanvas canvas = 3; -} - -message ImagesCompositeResponse { - required ImageData image = 1; -} - -message ImagesHistogramRequest { - required ImageData image = 1; -} - -message ImagesHistogram { - repeated int32 red = 1; - repeated int32 green = 2; - repeated int32 blue = 3; -} - -message ImagesHistogramResponse { - required ImagesHistogram histogram = 1; -} - -message ImagesGetUrlBaseRequest { - required string blob_key = 1; - - optional bool create_secure_url = 2 [default = false]; -} - -message ImagesGetUrlBaseResponse { - required string url = 1; -} - -message ImagesDeleteUrlBaseRequest { - required string blob_key = 1; -} - -message ImagesDeleteUrlBaseResponse { -} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go deleted file mode 100644 index 66e8d76..0000000 --- a/vendor/google.golang.org/appengine/internal/internal.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package internal provides support for package appengine. -// -// Programs should not use this package directly. Its API is not stable. -// Use packages appengine and appengine/* instead. -package internal - -import ( - "fmt" - "io" - "log" - "net/http" - "net/url" - "os" - - "github.com/golang/protobuf/proto" - - remotepb "google.golang.org/appengine/internal/remote_api" -) - -// errorCodeMaps is a map of service name to the error code map for the service. -var errorCodeMaps = make(map[string]map[int32]string) - -// RegisterErrorCodeMap is called from API implementations to register their -// error code map. This should only be called from init functions. -func RegisterErrorCodeMap(service string, m map[int32]string) { - errorCodeMaps[service] = m -} - -type timeoutCodeKey struct { - service string - code int32 -} - -// timeoutCodes is the set of service+code pairs that represent timeouts. -var timeoutCodes = make(map[timeoutCodeKey]bool) - -func RegisterTimeoutErrorCode(service string, code int32) { - timeoutCodes[timeoutCodeKey{service, code}] = true -} - -// APIError is the type returned by appengine.Context's Call method -// when an API call fails in an API-specific way. This may be, for instance, -// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. -type APIError struct { - Service string - Detail string - Code int32 // API-specific error code -} - -func (e *APIError) Error() string { - if e.Code == 0 { - if e.Detail == "" { - return "APIError