summaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
authorWim <wim@42.be>2022-04-01 00:23:19 +0200
committerGitHub <noreply@github.com>2022-04-01 00:23:19 +0200
commitc6716e030c02f316b887c1d3ee4b443aa3ab6afd (patch)
tree470461fe2d29662e7a69834ed21fce30beed65ab /vendor/github.com
parent4ab72acec656dafd304f88359b509b1f27c06604 (diff)
downloadmatterbridge-msglm-c6716e030c02f316b887c1d3ee4b443aa3ab6afd.tar.gz
matterbridge-msglm-c6716e030c02f316b887c1d3ee4b443aa3ab6afd.tar.bz2
matterbridge-msglm-c6716e030c02f316b887c1d3ee4b443aa3ab6afd.zip
Update dependencies (#1784)
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/.gitignore5
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/.golangci.yml35
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md10
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md13
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/LICENSE24
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/README.md169
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/decode/decode.go13
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/errors/errors.go59
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go18
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/graphql.go339
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/id.go30
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go103
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go18
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go229
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go58
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/common/types.go67
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/common/values.go37
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go381
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go390
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go70
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go453
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go269
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go179
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/query/query.go156
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go203
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go586
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go71
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go980
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/introspection.go118
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go312
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/log/log.go23
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/nullable_types.go166
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/subscriptions.go96
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/time.go64
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/trace/trace.go96
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go25
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/argument.go44
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/directive.go34
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/doc.go9
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/enum.go32
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/extension.go13
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/field.go39
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/fragment.go51
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/input.go47
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/interface.go25
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/object.go25
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/query.go62
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/scalar.go22
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/schema.go42
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/types.go63
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/union.go24
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/value.go141
-rw-r--r--vendor/github.com/graph-gophers/graphql-go/types/variable.go15
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/README.md4
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/cpuid.go74
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/detect_arm64.go3
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/detect_ref.go3
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/detect_x86.go3
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/featureid_string.go197
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go5
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go3
-rw-r--r--vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go3
-rw-r--r--vendor/github.com/labstack/echo/v4/CHANGELOG.md24
-rw-r--r--vendor/github.com/labstack/echo/v4/echo.go12
-rw-r--r--vendor/github.com/labstack/echo/v4/echo_fs_go1.16.go40
-rw-r--r--vendor/github.com/labstack/echo/v4/middleware/extractor.go4
-rw-r--r--vendor/github.com/labstack/echo/v4/middleware/recover.go4
-rw-r--r--vendor/github.com/labstack/echo/v4/middleware/timeout.go130
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/bulk_export.go13
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/channel.go43
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/channel_sidebar.go42
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/client4.go99
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/config.go12
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/custom_status.go8
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/emoji.go5
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go27
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/github_release.go26
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/group.go47
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/job.go26
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/license.go6
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/migration.go1
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/onboarding.go25
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/permission.go42
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/plugin_on_install_event.go9
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/post.go14
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/role.go17
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/status.go13
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/system.go9
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/team.go2
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go6
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/thread.go59
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/upload_session.go5
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/user.go40
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/user_get.go2
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/user_search.go1
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/utils.go18
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/version.go3
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/model/websocket_message.go5
-rw-r--r--vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/graphql.go23
-rw-r--r--vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md1
-rw-r--r--vendor/github.com/minio/minio-go/v7/README.md3
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-error-response.go1
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-object-acl.go5
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-object.go4
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-get-options.go1
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-put-object.go15
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-remove.go128
-rw-r--r--vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go2
-rw-r--r--vendor/github.com/minio/minio-go/v7/api.go75
-rw-r--r--vendor/github.com/minio/minio-go/v7/functional_tests.go152
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go20
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go96
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go (renamed from vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go)0
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go20
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go21
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go (renamed from vendor/github.com/minio/minio-go/v7/pkg/credentials/sts-tls-identity.go)20
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go20
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go17
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go16
-rw-r--r--vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go10
-rw-r--r--vendor/github.com/minio/minio-go/v7/transport.go1
-rw-r--r--vendor/github.com/opentracing/opentracing-go/.gitignore1
-rw-r--r--vendor/github.com/opentracing/opentracing-go/.travis.yml20
-rw-r--r--vendor/github.com/opentracing/opentracing-go/CHANGELOG.md63
-rw-r--r--vendor/github.com/opentracing/opentracing-go/LICENSE201
-rw-r--r--vendor/github.com/opentracing/opentracing-go/Makefile20
-rw-r--r--vendor/github.com/opentracing/opentracing-go/README.md171
-rw-r--r--vendor/github.com/opentracing/opentracing-go/ext.go24
-rw-r--r--vendor/github.com/opentracing/opentracing-go/ext/field.go17
-rw-r--r--vendor/github.com/opentracing/opentracing-go/ext/tags.go215
-rw-r--r--vendor/github.com/opentracing/opentracing-go/globaltracer.go42
-rw-r--r--vendor/github.com/opentracing/opentracing-go/gocontext.go65
-rw-r--r--vendor/github.com/opentracing/opentracing-go/log/field.go282
-rw-r--r--vendor/github.com/opentracing/opentracing-go/log/util.go61
-rw-r--r--vendor/github.com/opentracing/opentracing-go/noop.go64
-rw-r--r--vendor/github.com/opentracing/opentracing-go/propagation.go176
-rw-r--r--vendor/github.com/opentracing/opentracing-go/span.go189
-rw-r--r--vendor/github.com/opentracing/opentracing-go/tracer.go304
139 files changed, 9901 insertions, 377 deletions
diff --git a/vendor/github.com/graph-gophers/graphql-go/.gitignore b/vendor/github.com/graph-gophers/graphql-go/.gitignore
new file mode 100644
index 00000000..2fa95abe
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/.gitignore
@@ -0,0 +1,5 @@
+/.idea
+/.vscode
+/internal/validation/testdata/graphql-js
+/internal/validation/testdata/node_modules
+/vendor
diff --git a/vendor/github.com/graph-gophers/graphql-go/.golangci.yml b/vendor/github.com/graph-gophers/graphql-go/.golangci.yml
new file mode 100644
index 00000000..c6741d58
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/.golangci.yml
@@ -0,0 +1,35 @@
+run:
+ timeout: 5m
+
+linters-settings:
+ gofmt:
+ simplify: true
+ govet:
+ check-shadowing: true
+ enable-all: true
+ disable:
+ - fieldalignment
+ - deepequalerrors # remove later
+
+linters:
+ disable-all: true
+ enable:
+ - deadcode
+ - gofmt
+ - gosimple
+ - govet
+ - ineffassign
+ - exportloopref
+ - structcheck
+ - staticcheck
+ - unconvert
+ - unused
+ - varcheck
+ - misspell
+ - goimports
+
+issues:
+ exclude-rules:
+ - linters:
+ - unused
+ path: "graphql_test.go" \ No newline at end of file
diff --git a/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md b/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md
new file mode 100644
index 00000000..e5f48c06
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md
@@ -0,0 +1,10 @@
+CHANGELOG
+
+[v1.1.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.1.0) Release v1.1.0
+* [FEATURE] Add types package #437
+* [FEATURE] Expose `packer.Unmarshaler` as `decode.Unmarshaler` to the public #450
+* [FEATURE] Add location fields to type definitions #454
+* [FEATURE] `errors.Errorf` preserves original error similar to `fmt.Errorf` #456
+* [BUGFIX] Fix duplicated __typename in response (fixes #369) #443
+
+[v1.0.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.0.0) Initial release
diff --git a/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md b/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md
new file mode 100644
index 00000000..a2cffca8
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md
@@ -0,0 +1,13 @@
+## Contributing
+
+- With issues:
+ - Use the search tool before opening a new issue.
+ - Please provide source code and commit sha if you found a bug.
+ - Review existing issues and provide feedback or react to them.
+
+- With pull requests:
+ - Open your pull request against `master`
+ - Your pull request should have no more than two commits, if not you should squash them.
+ - It should pass all tests in the available continuous integrations systems such as TravisCI.
+ - You should add/modify tests to cover your proposed code changes.
+ - If your pull request contains a new feature, please document it on the README. \ No newline at end of file
diff --git a/vendor/github.com/graph-gophers/graphql-go/LICENSE b/vendor/github.com/graph-gophers/graphql-go/LICENSE
new file mode 100644
index 00000000..3907ceca
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2016 Richard Musiol. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/graph-gophers/graphql-go/README.md b/vendor/github.com/graph-gophers/graphql-go/README.md
new file mode 100644
index 00000000..87b020c1
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/README.md
@@ -0,0 +1,169 @@
+# graphql-go [![Sourcegraph](https://sourcegraph.com/github.com/graph-gophers/graphql-go/-/badge.svg)](https://sourcegraph.com/github.com/graph-gophers/graphql-go?badge) [![Build Status](https://graph-gophers.semaphoreci.com/badges/graphql-go/branches/master.svg?style=shields)](https://graph-gophers.semaphoreci.com/projects/graphql-go) [![GoDoc](https://godoc.org/github.com/graph-gophers/graphql-go?status.svg)](https://godoc.org/github.com/graph-gophers/graphql-go)
+
+<p align="center"><img src="docs/img/logo.png" width="300"></p>
+
+The goal of this project is to provide full support of the [GraphQL draft specification](https://facebook.github.io/graphql/draft) with a set of idiomatic, easy to use Go packages.
+
+While still under heavy development (`internal` APIs are almost certainly subject to change), this library is
+safe for production use.
+
+## Features
+
+- minimal API
+- support for `context.Context`
+- support for the `OpenTracing` standard
+- schema type-checking against resolvers
+- resolvers are matched to the schema based on method sets (can resolve a GraphQL schema with a Go interface or Go struct).
+- handles panics in resolvers
+- parallel execution of resolvers
+- subscriptions
+ - [sample WS transport](https://github.com/graph-gophers/graphql-transport-ws)
+
+## Roadmap
+
+We're trying out the GitHub Project feature to manage `graphql-go`'s [development roadmap](https://github.com/graph-gophers/graphql-go/projects/1).
+Feedback is welcome and appreciated.
+
+## (Some) Documentation
+
+### Basic Sample
+
+```go
+package main
+
+import (
+ "log"
+ "net/http"
+
+ graphql "github.com/graph-gophers/graphql-go"
+ "github.com/graph-gophers/graphql-go/relay"
+)
+
+type query struct{}
+
+func (_ *query) Hello() string { return "Hello, world!" }
+
+func main() {
+ s := `
+ type Query {
+ hello: String!
+ }
+ `
+ schema := graphql.MustParseSchema(s, &query{})
+ http.Handle("/query", &relay.Handler{Schema: schema})
+ log.Fatal(http.ListenAndServe(":8080", nil))
+}
+```
+
+To test:
+
+```sh
+curl -XPOST -d '{"query": "{ hello }"}' localhost:8080/query
+```
+
+### Resolvers
+
+A resolver must have one method or field for each field of the GraphQL type it resolves. The method or field name has to be [exported](https://golang.org/ref/spec#Exported_identifiers) and match the schema's field's name in a non-case-sensitive way.
+You can use struct fields as resolvers by using `SchemaOpt: UseFieldResolvers()`. For example,
+```
+opts := []graphql.SchemaOpt{graphql.UseFieldResolvers()}
+schema := graphql.MustParseSchema(s, &query{}, opts...)
+```
+
+When using `UseFieldResolvers` schema option, a struct field will be used *only* when:
+- there is no method for a struct field
+- a struct field does not implement an interface method
+- a struct field does not have arguments
+
+The method has up to two arguments:
+
+- Optional `context.Context` argument.
+- Mandatory `*struct { ... }` argument if the corresponding GraphQL field has arguments. The names of the struct fields have to be [exported](https://golang.org/ref/spec#Exported_identifiers) and have to match the names of the GraphQL arguments in a non-case-sensitive way.
+
+The method has up to two results:
+
+- The GraphQL field's value as determined by the resolver.
+- Optional `error` result.
+
+Example for a simple resolver method:
+
+```go
+func (r *helloWorldResolver) Hello() string {
+ return "Hello world!"
+}
+```
+
+The following signature is also allowed:
+
+```go
+func (r *helloWorldResolver) Hello(ctx context.Context) (string, error) {
+ return "Hello world!", nil
+}
+```
+
+### Schema Options
+
+- `UseStringDescriptions()` enables the usage of double quoted and triple quoted. When this is not enabled, comments are parsed as descriptions instead.
+- `UseFieldResolvers()` specifies whether to use struct field resolvers.
+- `MaxDepth(n int)` specifies the maximum field nesting depth in a query. The default is 0 which disables max depth checking.
+- `MaxParallelism(n int)` specifies the maximum number of resolvers per request allowed to run in parallel. The default is 10.
+- `Tracer(tracer trace.Tracer)` is used to trace queries and fields. It defaults to `trace.OpenTracingTracer`.
+- `ValidationTracer(tracer trace.ValidationTracer)` is used to trace validation errors. It defaults to `trace.NoopValidationTracer`.
+- `Logger(logger log.Logger)` is used to log panics during query execution. It defaults to `exec.DefaultLogger`.
+- `PanicHandler(panicHandler errors.PanicHandler)` is used to transform panics into errors during query execution. It defaults to `errors.DefaultPanicHandler`.
+- `DisableIntrospection()` disables introspection queries.
+
+### Custom Errors
+
+Errors returned by resolvers can include custom extensions by implementing the `ResolverError` interface:
+
+```go
+type ResolverError interface {
+ error
+ Extensions() map[string]interface{}
+}
+```
+
+Example of a simple custom error:
+
+```go
+type droidNotFoundError struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (e droidNotFoundError) Error() string {
+ return fmt.Sprintf("error [%s]: %s", e.Code, e.Message)
+}
+
+func (e droidNotFoundError) Extensions() map[string]interface{} {
+ return map[string]interface{}{
+ "code": e.Code,
+ "message": e.Message,
+ }
+}
+```
+
+Which could produce a GraphQL error such as:
+
+```go
+{
+ "errors": [
+ {
+ "message": "error [NotFound]: This is not the droid you are looking for",
+ "path": [
+ "droid"
+ ],
+ "extensions": {
+ "code": "NotFound",
+ "message": "This is not the droid you are looking for"
+ }
+ }
+ ],
+ "data": null
+}
+```
+
+### [Examples](https://github.com/graph-gophers/graphql-go/wiki/Examples)
+
+### [Companies that use this library](https://github.com/graph-gophers/graphql-go/wiki/Users)
diff --git a/vendor/github.com/graph-gophers/graphql-go/decode/decode.go b/vendor/github.com/graph-gophers/graphql-go/decode/decode.go
new file mode 100644
index 00000000..56a9d5b5
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/decode/decode.go
@@ -0,0 +1,13 @@
+package decode
+
+// Unmarshaler defines the api of Go types mapped to custom GraphQL scalar types
+type Unmarshaler interface {
+ // ImplementsGraphQLType maps the implementing custom Go type
+ // to the GraphQL scalar type in the schema.
+ ImplementsGraphQLType(name string) bool
+ // UnmarshalGraphQL is the custom unmarshaler for the implementing type
+ //
+ // This function will be called whenever you use the
+ // custom GraphQL scalar type as an input
+ UnmarshalGraphQL(input interface{}) error
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/errors/errors.go b/vendor/github.com/graph-gophers/graphql-go/errors/errors.go
new file mode 100644
index 00000000..0f9340b1
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/errors/errors.go
@@ -0,0 +1,59 @@
+package errors
+
+import (
+ "fmt"
+)
+
+type QueryError struct {
+ Err error `json:"-"` // Err holds underlying if available
+ Message string `json:"message"`
+ Locations []Location `json:"locations,omitempty"`
+ Path []interface{} `json:"path,omitempty"`
+ Rule string `json:"-"`
+ ResolverError error `json:"-"`
+ Extensions map[string]interface{} `json:"extensions,omitempty"`
+}
+
+type Location struct {
+ Line int `json:"line"`
+ Column int `json:"column"`
+}
+
+func (a Location) Before(b Location) bool {
+ return a.Line < b.Line || (a.Line == b.Line && a.Column < b.Column)
+}
+
+func Errorf(format string, a ...interface{}) *QueryError {
+ // similar to fmt.Errorf, Errorf will wrap the last argument if it is an instance of error
+ var err error
+ if n := len(a); n > 0 {
+ if v, ok := a[n-1].(error); ok {
+ err = v
+ }
+ }
+
+ return &QueryError{
+ Err: err,
+ Message: fmt.Sprintf(format, a...),
+ }
+}
+
+func (err *QueryError) Error() string {
+ if err == nil {
+ return "<nil>"
+ }
+ str := fmt.Sprintf("graphql: %s", err.Message)
+ for _, loc := range err.Locations {
+ str += fmt.Sprintf(" (line %d, column %d)", loc.Line, loc.Column)
+ }
+ return str
+}
+
+func (err *QueryError) Unwrap() error {
+ if err == nil {
+ return nil
+ }
+ return err.Err
+}
+
+var _ error = &QueryError{}
diff --git a/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go b/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go
new file mode 100644
index 00000000..5446c2a9
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go
@@ -0,0 +1,18 @@
+package errors
+
+import (
+ "context"
+)
+
+// PanicHandler is the interface used to create custom panic errors that occur during query execution
+type PanicHandler interface {
+ MakePanicError(ctx context.Context, value interface{}) *QueryError
+}
+
+// DefaultPanicHandler is the default PanicHandler
+type DefaultPanicHandler struct{}
+
+// MakePanicError creates a new QueryError from a panic that occurred during execution
+func (h *DefaultPanicHandler) MakePanicError(ctx context.Context, value interface{}) *QueryError {
+ return Errorf("panic occurred: %v", value)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/graphql.go b/vendor/github.com/graph-gophers/graphql-go/graphql.go
new file mode 100644
index 00000000..76a6434d
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/graphql.go
@@ -0,0 +1,339 @@
+package graphql
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/internal/common"
+ "github.com/graph-gophers/graphql-go/internal/exec"
+ "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
+ "github.com/graph-gophers/graphql-go/internal/exec/selected"
+ "github.com/graph-gophers/graphql-go/internal/query"
+ "github.com/graph-gophers/graphql-go/internal/schema"
+ "github.com/graph-gophers/graphql-go/internal/validation"
+ "github.com/graph-gophers/graphql-go/introspection"
+ "github.com/graph-gophers/graphql-go/log"
+ "github.com/graph-gophers/graphql-go/trace"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+// ParseSchema parses a GraphQL schema and attaches the given root resolver. It returns an error if
+// the Go type signature of the resolvers does not match the schema. If nil is passed as the
+// resolver, then the schema can not be executed, but it may be inspected (e.g. with ToJSON).
+func ParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) (*Schema, error) {
+ s := &Schema{
+ schema: schema.New(),
+ maxParallelism: 10,
+ tracer: trace.OpenTracingTracer{},
+ logger: &log.DefaultLogger{},
+ panicHandler: &errors.DefaultPanicHandler{},
+ }
+ for _, opt := range opts {
+ opt(s)
+ }
+
+ if s.validationTracer == nil {
+ if tracer, ok := s.tracer.(trace.ValidationTracerContext); ok {
+ s.validationTracer = tracer
+ } else {
+ s.validationTracer = &validationBridgingTracer{tracer: trace.NoopValidationTracer{}}
+ }
+ }
+
+ if err := schema.Parse(s.schema, schemaString, s.useStringDescriptions); err != nil {
+ return nil, err
+ }
+ if err := s.validateSchema(); err != nil {
+ return nil, err
+ }
+
+ r, err := resolvable.ApplyResolver(s.schema, resolver)
+ if err != nil {
+ return nil, err
+ }
+ s.res = r
+
+ return s, nil
+}
+
+// MustParseSchema calls ParseSchema and panics on error.
+func MustParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) *Schema {
+ s, err := ParseSchema(schemaString, resolver, opts...)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// Schema represents a GraphQL schema with an optional resolver.
+type Schema struct {
+ schema *types.Schema
+ res *resolvable.Schema
+
+ maxDepth int
+ maxParallelism int
+ tracer trace.Tracer
+ validationTracer trace.ValidationTracerContext
+ logger log.Logger
+ panicHandler errors.PanicHandler
+ useStringDescriptions bool
+ disableIntrospection bool
+ subscribeResolverTimeout time.Duration
+}
+
+func (s *Schema) ASTSchema() *types.Schema {
+ return s.schema
+}
+
+// SchemaOpt is an option to pass to ParseSchema or MustParseSchema.
+type SchemaOpt func(*Schema)
+
+// UseStringDescriptions enables the usage of double quoted and triple quoted
+// strings as descriptions as per the June 2018 spec
+// https://facebook.github.io/graphql/June2018/. When this is not enabled,
+// comments are parsed as descriptions instead.
+func UseStringDescriptions() SchemaOpt {
+ return func(s *Schema) {
+ s.useStringDescriptions = true
+ }
+}
+
+// UseFieldResolvers specifies whether to use struct field resolvers
+func UseFieldResolvers() SchemaOpt {
+ return func(s *Schema) {
+ s.schema.UseFieldResolvers = true
+ }
+}
+
+// MaxDepth specifies the maximum field nesting depth in a query. The default is 0 which disables max depth checking.
+func MaxDepth(n int) SchemaOpt {
+ return func(s *Schema) {
+ s.maxDepth = n
+ }
+}
+
+// MaxParallelism specifies the maximum number of resolvers per request allowed to run in parallel. The default is 10.
+func MaxParallelism(n int) SchemaOpt {
+ return func(s *Schema) {
+ s.maxParallelism = n
+ }
+}
+
+// Tracer is used to trace queries and fields. It defaults to trace.OpenTracingTracer.
+func Tracer(tracer trace.Tracer) SchemaOpt {
+ return func(s *Schema) {
+ s.tracer = tracer
+ }
+}
+
+// ValidationTracer is used to trace validation errors. It defaults to trace.NoopValidationTracer.
+// Deprecated: context is needed to support tracing correctly. Use a Tracer which implements trace.ValidationTracerContext.
+func ValidationTracer(tracer trace.ValidationTracer) SchemaOpt { //nolint:staticcheck
+ return func(s *Schema) {
+ s.validationTracer = &validationBridgingTracer{tracer: tracer}
+ }
+}
+
+// Logger is used to log panics during query execution. It defaults to exec.DefaultLogger.
+func Logger(logger log.Logger) SchemaOpt {
+ return func(s *Schema) {
+ s.logger = logger
+ }
+}
+
+// PanicHandler is used to customize the panic errors during query execution.
+// It defaults to errors.DefaultPanicHandler.
+func PanicHandler(panicHandler errors.PanicHandler) SchemaOpt {
+ return func(s *Schema) {
+ s.panicHandler = panicHandler
+ }
+}
+
+// DisableIntrospection disables introspection queries.
+func DisableIntrospection() SchemaOpt {
+ return func(s *Schema) {
+ s.disableIntrospection = true
+ }
+}
+
+// SubscribeResolverTimeout is an option to control the amount of time
+// we allow for a single subscribe message resolver to complete it's job
+// before it times out and returns an error to the subscriber.
+func SubscribeResolverTimeout(timeout time.Duration) SchemaOpt {
+ return func(s *Schema) {
+ s.subscribeResolverTimeout = timeout
+ }
+}
+
+// Response represents a typical response of a GraphQL server. It may be encoded to JSON directly or
+// it may be further processed to a custom response type, for example to include custom error data.
+// Errors are intentionally serialized first based on the advice in https://github.com/facebook/graphql/commit/7b40390d48680b15cb93e02d46ac5eb249689876#diff-757cea6edf0288677a9eea4cfc801d87R107
+type Response struct {
+ Errors []*errors.QueryError `json:"errors,omitempty"`
+ Data json.RawMessage `json:"data,omitempty"`
+ Extensions map[string]interface{} `json:"extensions,omitempty"`
+}
+
+// Validate validates the given query with the schema.
+func (s *Schema) Validate(queryString string) []*errors.QueryError {
+ return s.ValidateWithVariables(queryString, nil)
+}
+
+// ValidateWithVariables validates the given query with the schema and the input variables.
+func (s *Schema) ValidateWithVariables(queryString string, variables map[string]interface{}) []*errors.QueryError {
+ doc, qErr := query.Parse(queryString)
+ if qErr != nil {
+ return []*errors.QueryError{qErr}
+ }
+
+ return validation.Validate(s.schema, doc, variables, s.maxDepth)
+}
+
+// Exec executes the given query with the schema's resolver. It panics if the schema was created
+// without a resolver. If the context get cancelled, no further resolvers will be called and a
+// the context error will be returned as soon as possible (not immediately).
+func (s *Schema) Exec(ctx context.Context, queryString string, operationName string, variables map[string]interface{}) *Response {
+ if !s.res.Resolver.IsValid() {
+ panic("schema created without resolver, can not exec")
+ }
+ return s.exec(ctx, queryString, operationName, variables, s.res)
+}
+
+func (s *Schema) exec(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, res *resolvable.Schema) *Response {
+ doc, qErr := query.Parse(queryString)
+ if qErr != nil {
+ return &Response{Errors: []*errors.QueryError{qErr}}
+ }
+
+ validationFinish := s.validationTracer.TraceValidation(ctx)
+ errs := validation.Validate(s.schema, doc, variables, s.maxDepth)
+ validationFinish(errs)
+ if len(errs) != 0 {
+ return &Response{Errors: errs}
+ }
+
+ op, err := getOperation(doc, operationName)
+ if err != nil {
+ return &Response{Errors: []*errors.QueryError{errors.Errorf("%s", err)}}
+ }
+
+ // If the optional "operationName" POST parameter is not provided then
+ // use the query's operation name for improved tracing.
+ if operationName == "" {
+ operationName = op.Name.Name
+ }
+
+ // Subscriptions are not valid in Exec. Use schema.Subscribe() instead.
+ if op.Type == query.Subscription {
+ return &Response{Errors: []*errors.QueryError{{Message: "graphql-ws protocol header is missing"}}}
+ }
+ if op.Type == query.Mutation {
+ if _, ok := s.schema.EntryPoints["mutation"]; !ok {
+ return &Response{Errors: []*errors.QueryError{{Message: "no mutations are offered by the schema"}}}
+ }
+ }
+
+ // Fill in variables with the defaults from the operation
+ if variables == nil {
+ variables = make(map[string]interface{}, len(op.Vars))
+ }
+ for _, v := range op.Vars {
+ if _, ok := variables[v.Name.Name]; !ok && v.Default != nil {
+ variables[v.Name.Name] = v.Default.Deserialize(nil)
+ }
+ }
+
+ r := &exec.Request{
+ Request: selected.Request{
+ Doc: doc,
+ Vars: variables,
+ Schema: s.schema,
+ DisableIntrospection: s.disableIntrospection,
+ },
+ Limiter: make(chan struct{}, s.maxParallelism),
+ Tracer: s.tracer,
+ Logger: s.logger,
+ PanicHandler: s.panicHandler,
+ }
+ varTypes := make(map[string]*introspection.Type)
+ for _, v := range op.Vars {
+ t, err := common.ResolveType(v.Type, s.schema.Resolve)
+ if err != nil {
+ return &Response{Errors: []*errors.QueryError{err}}
+ }
+ varTypes[v.Name.Name] = introspection.WrapType(t)
+ }
+ traceCtx, finish := s.tracer.TraceQuery(ctx, queryString, operationName, variables, varTypes)
+ data, errs := r.Execute(traceCtx, res, op)
+ finish(errs)
+
+ return &Response{
+ Data: data,
+ Errors: errs,
+ }
+}
+
+func (s *Schema) validateSchema() error {
+ // https://graphql.github.io/graphql-spec/June2018/#sec-Root-Operation-Types
+ // > The query root operation type must be provided and must be an Object type.
+ if err := validateRootOp(s.schema, "query", true); err != nil {
+ return err
+ }
+ // > The mutation root operation type is optional; if it is not provided, the service does not support mutations.
+ // > If it is provided, it must be an Object type.
+ if err := validateRootOp(s.schema, "mutation", false); err != nil {
+ return err
+ }
+ // > Similarly, the subscription root operation type is also optional; if it is not provided, the service does not
+ // > support subscriptions. If it is provided, it must be an Object type.
+ if err := validateRootOp(s.schema, "subscription", false); err != nil {
+ return err
+ }
+ return nil
+}
+
+type validationBridgingTracer struct {
+ tracer trace.ValidationTracer //nolint:staticcheck
+}
+
+func (t *validationBridgingTracer) TraceValidation(context.Context) trace.TraceValidationFinishFunc {
+ return t.tracer.TraceValidation()
+}
+
+func validateRootOp(s *types.Schema, name string, mandatory bool) error {
+ t, ok := s.EntryPoints[name]
+ if !ok {
+ if mandatory {
+ return fmt.Errorf("root operation %q must be defined", name)
+ }
+ return nil
+ }
+ if t.Kind() != "OBJECT" {
+ return fmt.Errorf("root operation %q must be an OBJECT", name)
+ }
+ return nil
+}
+
+func getOperation(document *types.ExecutableDefinition, operationName string) (*types.OperationDefinition, error) {
+ if len(document.Operations) == 0 {
+ return nil, fmt.Errorf("no operations in query document")
+ }
+
+ if operationName == "" {
+ if len(document.Operations) > 1 {
+ return nil, fmt.Errorf("more than one operation in query document and no operation name given")
+ }
+ for _, op := range document.Operations {
+ return op, nil // return the one and only operation
+ }
+ }
+
+ op := document.Operations.Get(operationName)
+ if op == nil {
+ return nil, fmt.Errorf("no operation with name %q", operationName)
+ }
+ return op, nil
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/id.go b/vendor/github.com/graph-gophers/graphql-go/id.go
new file mode 100644
index 00000000..80bdac90
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/id.go
@@ -0,0 +1,30 @@
+package graphql
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// ID represents GraphQL's "ID" scalar type. A custom type may be used instead.
+type ID string
+
+func (ID) ImplementsGraphQLType(name string) bool {
+ return name == "ID"
+}
+
+func (id *ID) UnmarshalGraphQL(input interface{}) error {
+ var err error
+ switch input := input.(type) {
+ case string:
+ *id = ID(input)
+ case int32:
+ *id = ID(strconv.Itoa(int(input)))
+ default:
+ err = fmt.Errorf("wrong type for ID: %T", input)
+ }
+ return err
+}
+
+func (id ID) MarshalJSON() ([]byte, error) {
+ return strconv.AppendQuote(nil, string(id)), nil
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go
new file mode 100644
index 00000000..1f7fe813
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go
@@ -0,0 +1,103 @@
+// MIT License
+//
+// Copyright (c) 2019 GraphQL Contributors
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+// This implementation has been adapted from the graphql-js reference implementation
+// https://github.com/graphql/graphql-js/blob/5eb7c4ded7ceb83ac742149cbe0dae07a8af9a30/src/language/blockString.js
+// which is released under the MIT License above.
+
+package common
+
+import (
+ "strings"
+)
+
+// Produces the value of a block string from its parsed raw value, similar to
+// CoffeeScript's block string, Python's docstring trim or Ruby's strip_heredoc.
+//
+// This implements the GraphQL spec's BlockStringValue() static algorithm.
+func blockString(raw string) string {
+ lines := strings.Split(raw, "\n")
+
+ // Remove common indentation from all lines except the first (which has none)
+ ind := blockStringIndentation(lines)
+ if ind > 0 {
+ for i := 1; i < len(lines); i++ {
+ l := lines[i]
+ if len(l) < ind {
+ lines[i] = ""
+ continue
+ }
+ lines[i] = l[ind:]
+ }
+ }
+
+ // Remove leading and trailing blank lines
+ trimStart := 0
+ for i := 0; i < len(lines) && isBlank(lines[i]); i++ {
+ trimStart++
+ }
+ lines = lines[trimStart:]
+ trimEnd := 0
+ for i := len(lines) - 1; i > 0 && isBlank(lines[i]); i-- {
+ trimEnd++
+ }
+ lines = lines[:len(lines)-trimEnd]
+
+ return strings.Join(lines, "\n")
+}
+
+func blockStringIndentation(lines []string) int {
+ var commonIndent *int
+ for i := 1; i < len(lines); i++ {
+ l := lines[i]
+ indent := leadingWhitespace(l)
+ if indent == len(l) {
+ // don't consider blank/empty lines
+ continue
+ }
+ if indent == 0 {
+ return 0
+ }
+ if commonIndent == nil || indent < *commonIndent {
+ commonIndent = &indent
+ }
+ }
+ if commonIndent == nil {
+ return 0
+ }
+ return *commonIndent
+}
+
+func isBlank(s string) bool {
+ return len(s) == 0 || leadingWhitespace(s) == len(s)
+}
+
+func leadingWhitespace(s string) int {
+ i := 0
+ for _, r := range s {
+ if r != '\t' && r != ' ' {
+ break
+ }
+ i++
+ }
+ return i
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go
new file mode 100644
index 00000000..f767e28f
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go
@@ -0,0 +1,18 @@
+package common
+
+import "github.com/graph-gophers/graphql-go/types"
+
+func ParseDirectives(l *Lexer) types.DirectiveList {
+ var directives types.DirectiveList
+ for l.Peek() == '@' {
+ l.ConsumeToken('@')
+ d := &types.Directive{}
+ d.Name = l.ConsumeIdentWithLoc()
+ d.Name.Loc.Column--
+ if l.Peek() == '(' {
+ d.Arguments = ParseArgumentList(l)
+ }
+ directives = append(directives, d)
+ }
+ return directives
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go
new file mode 100644
index 00000000..ff45bcad
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go
@@ -0,0 +1,229 @@
+package common
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+ "text/scanner"
+
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+type syntaxError string
+
+type Lexer struct {
+ sc *scanner.Scanner
+ next rune
+ comment bytes.Buffer
+ useStringDescriptions bool
+}
+
+type Ident struct {
+ Name string
+ Loc errors.Location
+}
+
+func NewLexer(s string, useStringDescriptions bool) *Lexer {
+ sc := &scanner.Scanner{
+ Mode: scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings,
+ }
+ sc.Init(strings.NewReader(s))
+
+ l := Lexer{sc: sc, useStringDescriptions: useStringDescriptions}
+ l.sc.Error = l.CatchScannerError
+
+ return &l
+}
+
+func (l *Lexer) CatchSyntaxError(f func()) (errRes *errors.QueryError) {
+ defer func() {
+ if err := recover(); err != nil {
+ if err, ok := err.(syntaxError); ok {
+ errRes = errors.Errorf("syntax error: %s", err)
+ errRes.Locations = []errors.Location{l.Location()}
+ return
+ }
+ panic(err)
+ }
+ }()
+
+ f()
+ return
+}
+
+func (l *Lexer) Peek() rune {
+ return l.next
+}
+
+// ConsumeWhitespace consumes whitespace and tokens equivalent to whitespace (e.g. commas and comments).
+//
+// Consumed comment characters will build the description for the next type or field encountered.
+// The description is available from `DescComment()`, and will be reset every time `ConsumeWhitespace()` is
+// executed unless l.useStringDescriptions is set.
+func (l *Lexer) ConsumeWhitespace() {
+ l.comment.Reset()
+ for {
+ l.next = l.sc.Scan()
+
+ if l.next == ',' {
+ // Similar to white space and line terminators, commas (',') are used to improve the
+ // legibility of source text and separate lexical tokens but are otherwise syntactically and
+ // semantically insignificant within GraphQL documents.
+ //
+ // http://facebook.github.io/graphql/draft/#sec-Insignificant-Commas
+ continue
+ }
+
+ if l.next == '#' {
+ // GraphQL source documents may contain single-line comments, starting with the '#' marker.
+ //
+ // A comment can contain any Unicode code point except `LineTerminator` so a comment always
+ // consists of all code points starting with the '#' character up to but not including the
+ // line terminator.
+ l.consumeComment()
+ continue
+ }
+
+ break
+ }
+}
+
+// consumeDescription optionally consumes a description based on the June 2018 graphql spec if any are present.
+//
+// Single quote strings are also single line. Triple quote strings can be multi-line. Triple quote strings
+// whitespace trimmed on both ends.
+// If a description is found, consume any following comments as well
+//
+// http://facebook.github.io/graphql/June2018/#sec-Descriptions
+func (l *Lexer) consumeDescription() string {
+ // If the next token is not a string, we don't consume it
+ if l.next != scanner.String {
+ return ""
+ }
+ // Triple quote string is an empty "string" followed by an open quote due to the way the parser treats strings as one token
+ var desc string
+ if l.sc.Peek() == '"' {
+ desc = l.consumeTripleQuoteComment()
+ } else {
+ desc = l.consumeStringComment()
+ }
+ l.ConsumeWhitespace()
+ return desc
+}
+
+func (l *Lexer) ConsumeIdent() string {
+ name := l.sc.TokenText()
+ l.ConsumeToken(scanner.Ident)
+ return name
+}
+
+func (l *Lexer) ConsumeIdentWithLoc() types.Ident {
+ loc := l.Location()
+ name := l.sc.TokenText()
+ l.ConsumeToken(scanner.Ident)
+ return types.Ident{Name: name, Loc: loc}
+}
+
+func (l *Lexer) ConsumeKeyword(keyword string) {
+ if l.next != scanner.Ident || l.sc.TokenText() != keyword {
+ l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %q", l.sc.TokenText(), keyword))
+ }
+ l.ConsumeWhitespace()
+}
+
+func (l *Lexer) ConsumeLiteral() *types.PrimitiveValue {
+ lit := &types.PrimitiveValue{Type: l.next, Text: l.sc.TokenText()}
+ l.ConsumeWhitespace()
+ return lit
+}
+
+func (l *Lexer) ConsumeToken(expected rune) {
+ if l.next != expected {
+ l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %s", l.sc.TokenText(), scanner.TokenString(expected)))
+ }
+ l.ConsumeWhitespace()
+}
+
+func (l *Lexer) DescComment() string {
+ comment := l.comment.String()
+ desc := l.consumeDescription()
+ if l.useStringDescriptions {
+ return desc
+ }
+ return comment
+}
+
+func (l *Lexer) SyntaxError(message string) {
+ panic(syntaxError(message))
+}
+
+func (l *Lexer) Location() errors.Location {
+ return errors.Location{
+ Line: l.sc.Line,
+ Column: l.sc.Column,
+ }
+}
+
+func (l *Lexer) consumeTripleQuoteComment() string {
+ l.next = l.sc.Next()
+ if l.next != '"' {
+ panic("consumeTripleQuoteComment used in wrong context: no third quote?")
+ }
+
+ var buf bytes.Buffer
+ var numQuotes int
+ for {
+ l.next = l.sc.Next()
+ if l.next == '"' {
+ numQuotes++
+ } else {
+ numQuotes = 0
+ }
+ buf.WriteRune(l.next)
+ if numQuotes == 3 || l.next == scanner.EOF {
+ break
+ }
+ }
+ val := buf.String()
+ val = val[:len(val)-numQuotes]
+ return blockString(val)
+}
+
+func (l *Lexer) consumeStringComment() string {
+ val, err := strconv.Unquote(l.sc.TokenText())
+ if err != nil {
+ panic(err)
+ }
+ return val
+}
+
+// consumeComment consumes all characters from `#` to the first encountered line terminator.
+// The characters are appended to `l.comment`.
+func (l *Lexer) consumeComment() {
+ if l.next != '#' {
+ panic("consumeComment used in wrong context")
+ }
+
+ // TODO: count and trim whitespace so we can dedent any following lines.
+ if l.sc.Peek() == ' ' {
+ l.sc.Next()
+ }
+
+ if l.comment.Len() > 0 {
+ l.comment.WriteRune('\n')
+ }
+
+ for {
+ next := l.sc.Next()
+ if next == '\r' || next == '\n' || next == scanner.EOF {
+ break
+ }
+ l.comment.WriteRune(next)
+ }
+}
+
+func (l *Lexer) CatchScannerError(s *scanner.Scanner, msg string) {
+ l.SyntaxError(msg)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go
new file mode 100644
index 00000000..a6af3c43
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go
@@ -0,0 +1,58 @@
+package common
+
+import (
+ "text/scanner"
+
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+func ParseLiteral(l *Lexer, constOnly bool) types.Value {
+ loc := l.Location()
+ switch l.Peek() {
+ case '$':
+ if constOnly {
+ l.SyntaxError("variable not allowed")
+ panic("unreachable")
+ }
+ l.ConsumeToken('$')
+ return &types.Variable{Name: l.ConsumeIdent(), Loc: loc}
+
+ case scanner.Int, scanner.Float, scanner.String, scanner.Ident:
+ lit := l.ConsumeLiteral()
+ if lit.Type == scanner.Ident && lit.Text == "null" {
+ return &types.NullValue{Loc: loc}
+ }
+ lit.Loc = loc
+ return lit
+ case '-':
+ l.ConsumeToken('-')
+ lit := l.ConsumeLiteral()
+ lit.Text = "-" + lit.Text
+ lit.Loc = loc
+ return lit
+ case '[':
+ l.ConsumeToken('[')
+ var list []types.Value
+ for l.Peek() != ']' {
+ list = append(list, ParseLiteral(l, constOnly))
+ }
+ l.ConsumeToken(']')
+ return &types.ListValue{Values: list, Loc: loc}
+
+ case '{':
+ l.ConsumeToken('{')
+ var fields []*types.ObjectField
+ for l.Peek() != '}' {
+ name := l.ConsumeIdentWithLoc()
+ l.ConsumeToken(':')
+ value := ParseLiteral(l, constOnly)
+ fields = append(fields, &types.ObjectField{Name: name, Value: value})
+ }
+ l.ConsumeToken('}')
+ return &types.ObjectValue{Fields: fields, Loc: loc}
+
+ default:
+ l.SyntaxError("invalid value")
+ panic("unreachable")
+ }
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go
new file mode 100644
index 00000000..4a30f46e
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go
@@ -0,0 +1,67 @@
+package common
+
+import (
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+func ParseType(l *Lexer) types.Type {
+ t := parseNullType(l)
+ if l.Peek() == '!' {
+ l.ConsumeToken('!')
+ return &types.NonNull{OfType: t}
+ }
+ return t
+}
+
+func parseNullType(l *Lexer) types.Type {
+ if l.Peek() == '[' {
+ l.ConsumeToken('[')
+ ofType := ParseType(l)
+ l.ConsumeToken(']')
+ return &types.List{OfType: ofType}
+ }
+
+ return &types.TypeName{Ident: l.ConsumeIdentWithLoc()}
+}
+
+type Resolver func(name string) types.Type
+
+// ResolveType attempts to resolve a type's name against a resolving function.
+// This function is used when one needs to check if a TypeName exists in the resolver (typically a Schema).
+//
+// In the example below, ResolveType would be used to check if the resolving function
+// returns a valid type for Dimension:
+//
+// type Profile {
+// picture(dimensions: Dimension): Url
+// }
+//
+// ResolveType recursively unwraps List and NonNull types until a NamedType is reached.
+func ResolveType(t types.Type, resolver Resolver) (types.Type, *errors.QueryError) {
+ switch t := t.(type) {
+ case *types.List:
+ ofType, err := ResolveType(t.OfType, resolver)
+ if err != nil {
+ return nil, err
+ }
+ return &types.List{OfType: ofType}, nil
+ case *types.NonNull:
+ ofType, err := ResolveType(t.OfType, resolver)
+ if err != nil {
+ return nil, err
+ }
+ return &types.NonNull{OfType: ofType}, nil
+ case *types.TypeName:
+ refT := resolver(t.Name)
+ if refT == nil {
+ err := errors.Errorf("Unknown type %q.", t.Name)
+ err.Rule = "KnownTypeNames"
+ err.Locations = []errors.Location{t.Loc}
+ return nil, err
+ }
+ return refT, nil
+ default:
+ return t, nil
+ }
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go
new file mode 100644
index 00000000..2d6e0b54
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go
@@ -0,0 +1,37 @@
+package common
+
+import (
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+func ParseInputValue(l *Lexer) *types.InputValueDefinition {
+ p := &types.InputValueDefinition{}
+ p.Loc = l.Location()
+ p.Desc = l.DescComment()
+ p.Name = l.ConsumeIdentWithLoc()
+ l.ConsumeToken(':')
+ p.TypeLoc = l.Location()
+ p.Type = ParseType(l)
+ if l.Peek() == '=' {
+ l.ConsumeToken('=')
+ p.Default = ParseLiteral(l, true)
+ }
+ p.Directives = ParseDirectives(l)
+ return p
+}
+
+func ParseArgumentList(l *Lexer) types.ArgumentList {
+ var args types.ArgumentList
+ l.ConsumeToken('(')
+ for l.Peek() != ')' {
+ name := l.ConsumeIdentWithLoc()
+ l.ConsumeToken(':')
+ value := ParseLiteral(l, false)
+ args = append(args, &types.Argument{
+ Name: name,
+ Value: value,
+ })
+ }
+ l.ConsumeToken(')')
+ return args
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go
new file mode 100644
index 00000000..6b478487
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go
@@ -0,0 +1,381 @@
+package exec
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sync"
+ "time"
+
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
+ "github.com/graph-gophers/graphql-go/internal/exec/selected"
+ "github.com/graph-gophers/graphql-go/internal/query"
+ "github.com/graph-gophers/graphql-go/log"
+ "github.com/graph-gophers/graphql-go/trace"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+type Request struct {
+ selected.Request
+ Limiter chan struct{}
+ Tracer trace.Tracer
+ Logger log.Logger
+ PanicHandler errors.PanicHandler
+ SubscribeResolverTimeout time.Duration
+}
+
+func (r *Request) handlePanic(ctx context.Context) {
+ if value := recover(); value != nil {
+ r.Logger.LogPanic(ctx, value)
+ r.AddError(r.PanicHandler.MakePanicError(ctx, value))
+ }
+}
+
+type extensionser interface {
+ Extensions() map[string]interface{}
+}
+
+func (r *Request) Execute(ctx context.Context, s *resolvable.Schema, op *types.OperationDefinition) ([]byte, []*errors.QueryError) {
+ var out bytes.Buffer
+ func() {
+ defer r.handlePanic(ctx)
+ sels := selected.ApplyOperation(&r.Request, s, op)
+ r.execSelections(ctx, sels, nil, s, s.Resolver, &out, op.Type == query.Mutation)
+ }()
+
+ if err := ctx.Err(); err != nil {
+ return nil, []*errors.QueryError{errors.Errorf("%s", err)}
+ }
+
+ return out.Bytes(), r.Errs
+}
+
+type fieldToExec struct {
+ field *selected.SchemaField
+ sels []selected.Selection
+ resolver reflect.Value
+ out *bytes.Buffer
+}
+
+func resolvedToNull(b *bytes.Buffer) bool {
+ return bytes.Equal(b.Bytes(), []byte("null"))
+}
+
+func (r *Request) execSelections(ctx context.Context, sels []selected.Selection, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer, serially bool) {
+ async := !serially && selected.HasAsyncSel(sels)
+
+ var fields []*fieldToExec
+ collectFieldsToResolve(sels, s, resolver, &fields, make(map[string]*fieldToExec))
+
+ if async {
+ var wg sync.WaitGroup
+ wg.Add(len(fields))
+ for _, f := range fields {
+ go func(f *fieldToExec) {
+ defer wg.Done()
+ defer r.handlePanic(ctx)
+ f.out = new(bytes.Buffer)
+ execFieldSelection(ctx, r, s, f, &pathSegment{path, f.field.Alias}, true)
+ }(f)
+ }
+ wg.Wait()
+ } else {
+ for _, f := range fields {
+ f.out = new(bytes.Buffer)
+ execFieldSelection(ctx, r, s, f, &pathSegment{path, f.field.Alias}, true)
+ }
+ }
+
+ out.WriteByte('{')
+ for i, f := range fields {
+ // If a non-nullable child resolved to null, an error was added to the
+ // "errors" list in the response, so this field resolves to null.
+ // If this field is non-nullable, the error is propagated to its parent.
+ if _, ok := f.field.Type.(*types.NonNull); ok && resolvedToNull(f.out) {
+ out.Reset()
+ out.Write([]byte("null"))
+ return
+ }
+
+ if i > 0 {
+ out.WriteByte(',')
+ }
+ out.WriteByte('"')
+ out.WriteString(f.field.Alias)
+ out.WriteByte('"')
+ out.WriteByte(':')
+ out.Write(f.out.Bytes())
+ }
+ out.WriteByte('}')
+}
+
+func collectFieldsToResolve(sels []selected.Selection, s *resolvable.Schema, resolver reflect.Value, fields *[]*fieldToExec, fieldByAlias map[string]*fieldToExec) {
+ for _, sel := range sels {
+ switch sel := sel.(type) {
+ case *selected.SchemaField:
+ field, ok := fieldByAlias[sel.Alias]
+ if !ok { // validation already checked for conflict (TODO)
+ field = &fieldToExec{field: sel, resolver: resolver}
+ fieldByAlias[sel.Alias] = field
+ *fields = append(*fields, field)
+ }
+ field.sels = append(field.sels, sel.Sels...)
+
+ case *selected.TypenameField:
+ _, ok := fieldByAlias[sel.Alias]
+ if !ok {
+ res := reflect.ValueOf(typeOf(sel, resolver))
+ f := s.FieldTypename
+ f.TypeName = res.String()
+
+ sf := &selected.SchemaField{
+ Field: f,
+ Alias: sel.Alias,
+ FixedResult: res,
+ }
+
+ field := &fieldToExec{field: sf, resolver: resolver}
+ *fields = append(*fields, field)
+ fieldByAlias[sel.Alias] = field
+ }
+
+ case *selected.TypeAssertion:
+ out := resolver.Method(sel.MethodIndex).Call(nil)
+ if !out[1].Bool() {
+ continue
+ }
+ collectFieldsToResolve(sel.Sels, s, out[0], fields, fieldByAlias)
+
+ default:
+ panic("unreachable")
+ }
+ }
+}
+
+func typeOf(tf *selected.TypenameField, resolver reflect.Value) string {
+ if len(tf.TypeAssertions) == 0 {
+ return tf.Name
+ }
+ for name, a := range tf.TypeAssertions {
+ out := resolver.Method(a.MethodIndex).Call(nil)
+ if out[1].Bool() {
+ return name
+ }
+ }
+ return ""
+}
+
+func execFieldSelection(ctx context.Context, r *Request, s *resolvable.Schema, f *fieldToExec, path *pathSegment, applyLimiter bool) {
+ if applyLimiter {
+ r.Limiter <- struct{}{}
+ }
+
+ var result reflect.Value
+ var err *errors.QueryError
+
+ traceCtx, finish := r.Tracer.TraceField(ctx, f.field.TraceLabel, f.field.TypeName, f.field.Name, !f.field.Async, f.field.Args)
+ defer func() {
+ finish(err)
+ }()
+
+ err = func() (err *errors.QueryError) {
+ defer func() {
+ if panicValue := recover(); panicValue != nil {
+ r.Logger.LogPanic(ctx, panicValue)
+ err = r.PanicHandler.MakePanicError(ctx, panicValue)
+ err.Path = path.toSlice()
+ }
+ }()
+
+ if f.field.FixedResult.IsValid() {
+ result = f.field.FixedResult
+ return nil
+ }
+
+ if err := traceCtx.Err(); err != nil {
+ return errors.Errorf("%s", err) // don't execute any more resolvers if context got cancelled
+ }
+
+ res := f.resolver
+ if f.field.UseMethodResolver() {
+ var in []reflect.Value
+ if f.field.HasContext {
+ in = append(in, reflect.ValueOf(traceCtx))
+ }
+ if f.field.ArgsPacker != nil {
+ in = append(in, f.field.PackedArgs)
+ }
+ callOut := res.Method(f.field.MethodIndex).Call(in)
+ result = callOut[0]
+ if f.field.HasError && !callOut[1].IsNil() {
+ resolverErr := callOut[1].Interface().(error)
+ err := errors.Errorf("%s", resolverErr)
+ err.Path = path.toSlice()
+ err.ResolverError = resolverErr
+ if ex, ok := callOut[1].Interface().(extensionser); ok {
+ err.Extensions = ex.Extensions()
+ }
+ return err
+ }
+ } else {
+ // TODO extract out unwrapping ptr logic to a common place
+ if res.Kind() == reflect.Ptr {
+ res = res.Elem()
+ }
+ result = res.FieldByIndex(f.field.FieldIndex)
+ }
+ return nil
+ }()
+
+ if applyLimiter {
+ <-r.Limiter
+ }
+
+ if err != nil {
+ // If an error occurred while resolving a field, it should be treated as though the field
+ // returned null, and an error must be added to the "errors" list in the response.
+ r.AddError(err)
+ f.out.WriteString("null")
+ return
+ }
+
+ r.execSelectionSet(traceCtx, f.sels, f.field.Type, path, s, result, f.out)
+}
+
+func (r *Request) execSelectionSet(ctx context.Context, sels []selected.Selection, typ types.Type, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer) {
+ t, nonNull := unwrapNonNull(typ)
+
+ // a reflect.Value of a nil interface will show up as an Invalid value
+ if resolver.Kind() == reflect.Invalid || ((resolver.Kind() == reflect.Ptr || resolver.Kind() == reflect.Interface) && resolver.IsNil()) {
+ // If a field of a non-null type resolves to null (either because the
+ // function to resolve the field returned null or because an error occurred),
+ // add an error to the "errors" list in the response.
+ if nonNull {
+ err := errors.Errorf("graphql: got nil for non-null %q", t)
+ err.Path = path.toSlice()
+ r.AddError(err)
+ }
+ out.WriteString("null")
+ return
+ }
+
+ switch t.(type) {
+ case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union:
+ r.execSelections(ctx, sels, path, s, resolver, out, false)
+ return
+ }
+
+ // Any pointers or interfaces at this point should be non-nil, so we can get the actual value of them
+ // for serialization
+ if resolver.Kind() == reflect.Ptr || resolver.Kind() == reflect.Interface {
+ resolver = resolver.Elem()
+ }
+
+ switch t := t.(type) {
+ case *types.List:
+ r.execList(ctx, sels, t, path, s, resolver, out)
+
+ case *types.ScalarTypeDefinition:
+ v := resolver.Interface()
+ data, err := json.Marshal(v)
+ if err != nil {
+ panic(errors.Errorf("could not marshal %v: %s", v, err))
+ }
+ out.Write(data)
+
+ case *types.EnumTypeDefinition:
+ var stringer fmt.Stringer = resolver
+ if s, ok := resolver.Interface().(fmt.Stringer); ok {
+ stringer = s
+ }
+ name := stringer.String()
+ var valid bool
+ for _, v := range t.EnumValuesDefinition {
+ if v.EnumValue == name {
+ valid = true
+ break
+ }
+ }
+ if !valid {
+ err := errors.Errorf("Invalid value %s.\nExpected type %s, found %s.", name, t.Name, name)
+ err.Path = path.toSlice()
+ r.AddError(err)
+ out.WriteString("null")
+ return
+ }
+ out.WriteByte('"')
+ out.WriteString(name)
+ out.WriteByte('"')
+
+ default:
+ panic("unreachable")
+ }
+}
+
+func (r *Request) execList(ctx context.Context, sels []selected.Selection, typ *types.List, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer) {
+ l := resolver.Len()
+ entryouts := make([]bytes.Buffer, l)
+
+ if selected.HasAsyncSel(sels) {
+ // Limit the number of concurrent goroutines spawned as it can lead to large
+ // memory spikes for large lists.
+ concurrency := cap(r.Limiter)
+ sem := make(chan struct{}, concurrency)
+ for i := 0; i < l; i++ {
+ sem <- struct{}{}
+ go func(i int) {
+ defer func() { <-sem }()
+ defer r.handlePanic(ctx)
+ r.execSelectionSet(ctx, sels, typ.OfType, &pathSegment{path, i}, s, resolver.Index(i), &entryouts[i])
+ }(i)
+ }
+ for i := 0; i < concurrency; i++ {
+ sem <- struct{}{}
+ }
+ } else {
+ for i := 0; i < l; i++ {
+ r.execSelectionSet(ctx, sels, typ.OfType, &pathSegment{path, i}, s, resolver.Index(i), &entryouts[i])
+ }
+ }
+
+ _, listOfNonNull := typ.OfType.(*types.NonNull)
+
+ out.WriteByte('[')
+ for i, entryout := range entryouts {
+ // If the list wraps a non-null type and one of the list elements
+ // resolves to null, then the entire list resolves to null.
+ if listOfNonNull && resolvedToNull(&entryout) {
+ out.Reset()
+ out.WriteString("null")
+ return
+ }
+
+ if i > 0 {
+ out.WriteByte(',')
+ }
+ out.Write(entryout.Bytes())
+ }
+ out.WriteByte(']')
+}
+
+func unwrapNonNull(t types.Type) (types.Type, bool) {
+ if nn, ok := t.(*types.NonNull); ok {
+ return nn.OfType, true
+ }
+ return t, false
+}
+
+type pathSegment struct {
+ parent *pathSegment
+ value interface{}
+}
+
+func (p *pathSegment) toSlice() []interface{} {
+ if p == nil {
+ return nil
+ }
+ return append(p.parent.toSlice(), p.value)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go
new file mode 100644
index 00000000..c0bb7dc9
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go
@@ -0,0 +1,390 @@
+package packer
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/graph-gophers/graphql-go/decode"
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+type packer interface {
+ Pack(value interface{}) (reflect.Value, error)
+}
+
+type Builder struct {
+ packerMap map[typePair]*packerMapEntry
+ structPackers []*StructPacker
+}
+
+type typePair struct {
+ graphQLType types.Type
+ resolverType reflect.Type
+}
+
+type packerMapEntry struct {
+ packer packer
+ targets []*packer
+}
+
+func NewBuilder() *Builder {
+ return &Builder{
+ packerMap: make(map[typePair]*packerMapEntry),
+ }
+}
+
+func (b *Builder) Finish() error {
+ for _, entry := range b.packerMap {
+ for _, target := range entry.targets {
+ *target = entry.packer
+ }
+ }
+
+ for _, p := range b.structPackers {
+ p.defaultStruct = reflect.New(p.structType).Elem()
+ for _, f := range p.fields {
+ if defaultVal := f.field.Default; defaultVal != nil {
+ v, err := f.fieldPacker.Pack(defaultVal.Deserialize(nil))
+ if err != nil {
+ return err
+ }
+ p.defaultStruct.FieldByIndex(f.fieldIndex).Set(v)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (b *Builder) assignPacker(target *packer, schemaType types.Type, reflectType reflect.Type) error {
+ k := typePair{schemaType, reflectType}
+ ref, ok := b.packerMap[k]
+ if !ok {
+ ref = &packerMapEntry{}
+ b.packerMap[k] = ref
+ var err error
+ ref.packer, err = b.makePacker(schemaType, reflectType)
+ if err != nil {
+ return err
+ }
+ }
+ ref.targets = append(ref.targets, target)
+ return nil
+}
+
+func (b *Builder) makePacker(schemaType types.Type, reflectType reflect.Type) (packer, error) {
+ t, nonNull := unwrapNonNull(schemaType)
+ if !nonNull {
+ if reflectType.Kind() == reflect.Ptr {
+ elemType := reflectType.Elem()
+ addPtr := true
+ if _, ok := t.(*types.InputObject); ok {
+ elemType = reflectType // keep pointer for input objects
+ addPtr = false
+ }
+ elem, err := b.makeNonNullPacker(t, elemType)
+ if err != nil {
+ return nil, err
+ }
+ return &nullPacker{
+ elemPacker: elem,
+ valueType: reflectType,
+ addPtr: addPtr,
+ }, nil
+ } else if isNullable(reflectType) {
+ elemType := reflectType
+ addPtr := false
+ elem, err := b.makeNonNullPacker(t, elemType)
+ if err != nil {
+ return nil, err
+ }
+ return &nullPacker{
+ elemPacker: elem,
+ valueType: reflectType,
+ addPtr: addPtr,
+ }, nil
+ } else {
+ return nil, fmt.Errorf("%s is not a pointer or a nullable type", reflectType)
+ }
+ }
+
+ return b.makeNonNullPacker(t, reflectType)
+}
+
+func (b *Builder) makeNonNullPacker(schemaType types.Type, reflectType reflect.Type) (packer, error) {
+ if u, ok := reflect.New(reflectType).Interface().(decode.Unmarshaler); ok {
+ if !u.ImplementsGraphQLType(schemaType.String()) {
+ return nil, fmt.Errorf("can not unmarshal %s into %s", schemaType, reflectType)
+ }
+ return &unmarshalerPacker{
+ ValueType: reflectType,
+ }, nil
+ }
+
+ switch t := schemaType.(type) {
+ case *types.ScalarTypeDefinition:
+ return &ValuePacker{
+ ValueType: reflectType,
+ }, nil
+
+ case *types.EnumTypeDefinition:
+ if reflectType.Kind() != reflect.String {
+ return nil, fmt.Errorf("wrong type, expected %s", reflect.String)
+ }
+ return &ValuePacker{
+ ValueType: reflectType,
+ }, nil
+
+ case *types.InputObject:
+ e, err := b.MakeStructPacker(t.Values, reflectType)
+ if err != nil {
+ return nil, err
+ }
+ return e, nil
+
+ case *types.List:
+ if reflectType.Kind() != reflect.Slice {
+ return nil, fmt.Errorf("expected slice, got %s", reflectType)
+ }
+ p := &listPacker{
+ sliceType: reflectType,
+ }
+ if err := b.assignPacker(&p.elem, t.OfType, reflectType.Elem()); err != nil {
+ return nil, err
+ }
+ return p, nil
+
+ case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union:
+ return nil, fmt.Errorf("type of kind %s can not be used as input", t.Kind())
+
+ default:
+ panic("unreachable")
+ }
+}
+
+func (b *Builder) MakeStructPacker(values []*types.InputValueDefinition, typ reflect.Type) (*StructPacker, error) {
+ structType := typ
+ usePtr := false
+ if typ.Kind() == reflect.Ptr {
+ structType = typ.Elem()
+ usePtr = true
+ }
+ if structType.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("expected struct or pointer to struct, got %s (hint: missing `args struct { ... }` wrapper for field arguments?)", typ)
+ }
+
+ var fields []*structPackerField
+ for _, v := range values {
+ fe := &structPackerField{field: v}
+ fx := func(n string) bool {
+ return strings.EqualFold(stripUnderscore(n), stripUnderscore(v.Name.Name))
+ }
+
+ sf, ok := structType.FieldByNameFunc(fx)
+ if !ok {
+ return nil, fmt.Errorf("%s does not define field %q (hint: missing `args struct { ... }` wrapper for field arguments, or missing field on input struct)", typ, v.Name.Name)
+ }
+ if sf.PkgPath != "" {
+ return nil, fmt.Errorf("field %q must be exported", sf.Name)
+ }
+ fe.fieldIndex = sf.Index
+
+ ft := v.Type
+ if v.Default != nil {
+ ft, _ = unwrapNonNull(ft)
+ ft = &types.NonNull{OfType: ft}
+ }
+
+ if err := b.assignPacker(&fe.fieldPacker, ft, sf.Type); err != nil {
+ return nil, fmt.Errorf("field %q: %s", sf.Name, err)
+ }
+
+ fields = append(fields, fe)
+ }
+
+ p := &StructPacker{
+ structType: structType,
+ usePtr: usePtr,
+ fields: fields,
+ }
+ b.structPackers = append(b.structPackers, p)
+ return p, nil
+}
+
+type StructPacker struct {
+ structType reflect.Type
+ usePtr bool
+ defaultStruct reflect.Value
+ fields []*structPackerField
+}
+
+type structPackerField struct {
+ field *types.InputValueDefinition
+ fieldIndex []int
+ fieldPacker packer
+}
+
+func (p *StructPacker) Pack(value interface{}) (reflect.Value, error) {
+ if value == nil {
+ return reflect.Value{}, errors.Errorf("got null for non-null")
+ }
+
+ values := value.(map[string]interface{})
+ v := reflect.New(p.structType)
+ v.Elem().Set(p.defaultStruct)
+ for _, f := range p.fields {
+ if value, ok := values[f.field.Name.Name]; ok {
+ packed, err := f.fieldPacker.Pack(value)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ v.Elem().FieldByIndex(f.fieldIndex).Set(packed)
+ }
+ }
+ if !p.usePtr {
+ return v.Elem(), nil
+ }
+ return v, nil
+}
+
+type listPacker struct {
+ sliceType reflect.Type
+ elem packer
+}
+
+func (e *listPacker) Pack(value interface{}) (reflect.Value, error) {
+ list, ok := value.([]interface{})
+ if !ok {
+ list = []interface{}{value}
+ }
+
+ v := reflect.MakeSlice(e.sliceType, len(list), len(list))
+ for i := range list {
+ packed, err := e.elem.Pack(list[i])
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ v.Index(i).Set(packed)
+ }
+ return v, nil
+}
+
+type nullPacker struct {
+ elemPacker packer
+ valueType reflect.Type
+ addPtr bool
+}
+
+func (p *nullPacker) Pack(value interface{}) (reflect.Value, error) {
+ if value == nil && !isNullable(p.valueType) {
+ return reflect.Zero(p.valueType), nil
+ }
+
+ v, err := p.elemPacker.Pack(value)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ if p.addPtr {
+ ptr := reflect.New(p.valueType.Elem())
+ ptr.Elem().Set(v)
+ return ptr, nil
+ }
+
+ return v, nil
+}
+
+type ValuePacker struct {
+ ValueType reflect.Type
+}
+
+func (p *ValuePacker) Pack(value interface{}) (reflect.Value, error) {
+ if value == nil {
+ return reflect.Value{}, errors.Errorf("got null for non-null")
+ }
+
+ coerced, err := unmarshalInput(p.ValueType, value)
+ if err != nil {
+ return reflect.Value{}, fmt.Errorf("could not unmarshal %#v (%T) into %s: %s", value, value, p.ValueType, err)
+ }
+ return reflect.ValueOf(coerced), nil
+}
+
+type unmarshalerPacker struct {
+ ValueType reflect.Type
+}
+
+func (p *unmarshalerPacker) Pack(value interface{}) (reflect.Value, error) {
+ if value == nil && !isNullable(p.ValueType) {
+ return reflect.Value{}, errors.Errorf("got null for non-null")
+ }
+
+ v := reflect.New(p.ValueType)
+ if err := v.Interface().(decode.Unmarshaler).UnmarshalGraphQL(value); err != nil {
+ return reflect.Value{}, err
+ }
+ return v.Elem(), nil
+}
+
+func unmarshalInput(typ reflect.Type, input interface{}) (interface{}, error) {
+ if reflect.TypeOf(input) == typ {
+ return input, nil
+ }
+
+ switch typ.Kind() {
+ case reflect.Int32:
+ switch input := input.(type) {
+ case int:
+ if input < math.MinInt32 || input > math.MaxInt32 {
+ return nil, fmt.Errorf("not a 32-bit integer")
+ }
+ return int32(input), nil
+ case float64:
+ coerced := int32(input)
+ if input < math.MinInt32 || input > math.MaxInt32 || float64(coerced) != input {
+ return nil, fmt.Errorf("not a 32-bit integer")
+ }
+ return coerced, nil
+ }
+
+ case reflect.Float64:
+ switch input := input.(type) {
+ case int32:
+ return float64(input), nil
+ case int:
+ return float64(input), nil
+ }
+
+ case reflect.String:
+ if reflect.TypeOf(input).ConvertibleTo(typ) {
+ return reflect.ValueOf(input).Convert(typ).Interface(), nil
+ }
+ }
+
+ return nil, fmt.Errorf("incompatible type")
+}
+
+func unwrapNonNull(t types.Type) (types.Type, bool) {
+ if nn, ok := t.(*types.NonNull); ok {
+ return nn.OfType, true
+ }
+ return t, false
+}
+
+func stripUnderscore(s string) string {
+ return strings.Replace(s, "_", "", -1)
+}
+
+// NullUnmarshaller is an unmarshaller that can handle a nil input
+type NullUnmarshaller interface {
+ decode.Unmarshaler
+ Nullable()
+}
+
+func isNullable(t reflect.Type) bool {
+ _, ok := reflect.New(t).Interface().(NullUnmarshaller)
+ return ok
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go
new file mode 100644
index 00000000..02d5e262
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go
@@ -0,0 +1,70 @@
+package resolvable
+
+import (
+ "reflect"
+
+ "github.com/graph-gophers/graphql-go/introspection"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+// Meta defines the details of the metadata schema for introspection.
+type Meta struct {
+ FieldSchema Field
+ FieldType Field
+ FieldTypename Field
+ Schema *Object
+ Type *Object
+}
+
+func newMeta(s *types.Schema) *Meta {
+ var err error
+ b := newBuilder(s)
+
+ metaSchema := s.Types["__Schema"].(*types.ObjectTypeDefinition)
+ so, err := b.makeObjectExec(metaSchema.Name, metaSchema.Fields, nil, false, reflect.TypeOf(&introspection.Schema{}))
+ if err != nil {
+ panic(err)
+ }
+
+ metaType := s.Types["__Type"].(*types.ObjectTypeDefinition)
+ t, err := b.makeObjectExec(metaType.Name, metaType.Fields, nil, false, reflect.TypeOf(&introspection.Type{}))
+ if err != nil {
+ panic(err)
+ }
+
+ if err := b.finish(); err != nil {
+ panic(err)
+ }
+
+ fieldTypename := Field{
+ FieldDefinition: types.FieldDefinition{
+ Name: "__typename",
+ Type: &types.NonNull{OfType: s.Types["String"]},
+ },
+ TraceLabel: "GraphQL field: __typename",
+ }
+
+ fieldSchema := Field{
+ FieldDefinition: types.FieldDefinition{
+ Name: "__schema",
+ Type: s.Types["__Schema"],
+ },
+ TraceLabel: "GraphQL field: __schema",
+ }
+
+ fieldType := Field{
+ FieldDefinition: types.FieldDefinition{
+ Name: "__type",
+ Type: s.Types["__Type"],
+ },
+ TraceLabel: "GraphQL field: __type",
+ }
+
+ return &Meta{
+ FieldSchema: fieldSchema,
+ FieldTypename: fieldTypename,
+ FieldType: fieldType,
+ Schema: so,
+ Type: t,
+ }
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go
new file mode 100644
index 00000000..3410f557
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go
@@ -0,0 +1,453 @@
+package resolvable
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/graph-gophers/graphql-go/decode"
+ "github.com/graph-gophers/graphql-go/internal/exec/packer"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+type Schema struct {
+ *Meta
+ types.Schema
+ Query Resolvable
+ Mutation Resolvable
+ Subscription Resolvable
+ Resolver reflect.Value
+}
+
+type Resolvable interface {
+ isResolvable()
+}
+
+type Object struct {
+ Name string
+ Fields map[string]*Field
+ TypeAssertions map[string]*TypeAssertion
+}
+
+type Field struct {
+ types.FieldDefinition
+ TypeName string
+ MethodIndex int
+ FieldIndex []int
+ HasContext bool
+ HasError bool
+ ArgsPacker *packer.StructPacker
+ ValueExec Resolvable
+ TraceLabel string
+}
+
+func (f *Field) UseMethodResolver() bool {
+ return len(f.FieldIndex) == 0
+}
+
+type TypeAssertion struct {
+ MethodIndex int
+ TypeExec Resolvable
+}
+
+type List struct {
+ Elem Resolvable
+}
+
+type Scalar struct{}
+
+func (*Object) isResolvable() {}
+func (*List) isResolvable() {}
+func (*Scalar) isResolvable() {}
+
+func ApplyResolver(s *types.Schema, resolver interface{}) (*Schema, error) {
+ if resolver == nil {
+ return &Schema{Meta: newMeta(s), Schema: *s}, nil
+ }
+
+ b := newBuilder(s)
+
+ var query, mutation, subscription Resolvable
+
+ if t, ok := s.EntryPoints["query"]; ok {
+ if err := b.assignExec(&query, t, reflect.TypeOf(resolver)); err != nil {
+ return nil, err
+ }
+ }
+
+ if t, ok := s.EntryPoints["mutation"]; ok {
+ if err := b.assignExec(&mutation, t, reflect.TypeOf(resolver)); err != nil {
+ return nil, err
+ }
+ }
+
+ if t, ok := s.EntryPoints["subscription"]; ok {
+ if err := b.assignExec(&subscription, t, reflect.TypeOf(resolver)); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := b.finish(); err != nil {
+ return nil, err
+ }
+
+ return &Schema{
+ Meta: newMeta(s),
+ Schema: *s,
+ Resolver: reflect.ValueOf(resolver),
+ Query: query,
+ Mutation: mutation,
+ Subscription: subscription,
+ }, nil
+}
+
+type execBuilder struct {
+ schema *types.Schema
+ resMap map[typePair]*resMapEntry
+ packerBuilder *packer.Builder
+}
+
+type typePair struct {
+ graphQLType types.Type
+ resolverType reflect.Type
+}
+
+type resMapEntry struct {
+ exec Resolvable
+ targets []*Resolvable
+}
+
+func newBuilder(s *types.Schema) *execBuilder {
+ return &execBuilder{
+ schema: s,
+ resMap: make(map[typePair]*resMapEntry),
+ packerBuilder: packer.NewBuilder(),
+ }
+}
+
+func (b *execBuilder) finish() error {
+ for _, entry := range b.resMap {
+ for _, target := range entry.targets {
+ *target = entry.exec
+ }
+ }
+
+ return b.packerBuilder.Finish()
+}
+
+func (b *execBuilder) assignExec(target *Resolvable, t types.Type, resolverType reflect.Type) error {
+ k := typePair{t, resolverType}
+ ref, ok := b.resMap[k]
+ if !ok {
+ ref = &resMapEntry{}
+ b.resMap[k] = ref
+ var err error
+ ref.exec, err = b.makeExec(t, resolverType)
+ if err != nil {
+ return err
+ }
+ }
+ ref.targets = append(ref.targets, target)
+ return nil
+}
+
+func (b *execBuilder) makeExec(t types.Type, resolverType reflect.Type) (Resolvable, error) {
+ var nonNull bool
+ t, nonNull = unwrapNonNull(t)
+
+ switch t := t.(type) {
+ case *types.ObjectTypeDefinition:
+ return b.makeObjectExec(t.Name, t.Fields, nil, nonNull, resolverType)
+
+ case *types.InterfaceTypeDefinition:
+ return b.makeObjectExec(t.Name, t.Fields, t.PossibleTypes, nonNull, resolverType)
+
+ case *types.Union:
+ return b.makeObjectExec(t.Name, nil, t.UnionMemberTypes, nonNull, resolverType)
+ }
+
+ if !nonNull {
+ if resolverType.Kind() != reflect.Ptr {
+ return nil, fmt.Errorf("%s is not a pointer", resolverType)
+ }
+ resolverType = resolverType.Elem()
+ }
+
+ switch t := t.(type) {
+ case *types.ScalarTypeDefinition:
+ return makeScalarExec(t, resolverType)
+
+ case *types.EnumTypeDefinition:
+ return &Scalar{}, nil
+
+ case *types.List:
+ if resolverType.Kind() != reflect.Slice {
+ return nil, fmt.Errorf("%s is not a slice", resolverType)
+ }
+ e := &List{}
+ if err := b.assignExec(&e.Elem, t.OfType, resolverType.Elem()); err != nil {
+ return nil, err
+ }
+ return e, nil
+
+ default:
+ panic("invalid type: " + t.String())
+ }
+}
+
+func makeScalarExec(t *types.ScalarTypeDefinition, resolverType reflect.Type) (Resolvable, error) {
+ implementsType := false
+ switch r := reflect.New(resolverType).Interface().(type) {
+ case *int32:
+ implementsType = t.Name == "Int"
+ case *float64:
+ implementsType = t.Name == "Float"
+ case *string:
+ implementsType = t.Name == "String"
+ case *bool:
+ implementsType = t.Name == "Boolean"
+ case decode.Unmarshaler:
+ implementsType = r.ImplementsGraphQLType(t.Name)
+ }
+
+ if !implementsType {
+ return nil, fmt.Errorf("can not use %s as %s", resolverType, t.Name)
+ }
+ return &Scalar{}, nil
+}
+
+func (b *execBuilder) makeObjectExec(typeName string, fields types.FieldsDefinition, possibleTypes []*types.ObjectTypeDefinition,
+ nonNull bool, resolverType reflect.Type) (*Object, error) {
+ if !nonNull {
+ if resolverType.Kind() != reflect.Ptr && resolverType.Kind() != reflect.Interface {
+ return nil, fmt.Errorf("%s is not a pointer or interface", resolverType)
+ }
+ }
+
+ methodHasReceiver := resolverType.Kind() != reflect.Interface
+
+ Fields := make(map[string]*Field)
+ rt := unwrapPtr(resolverType)
+ fieldsCount := fieldCount(rt, map[string]int{})
+ for _, f := range fields {
+ var fieldIndex []int
+ methodIndex := findMethod(resolverType, f.Name)
+ if b.schema.UseFieldResolvers && methodIndex == -1 {
+ if fieldsCount[strings.ToLower(stripUnderscore(f.Name))] > 1 {
+ return nil, fmt.Errorf("%s does not resolve %q: ambiguous field %q", resolverType, typeName, f.Name)
+ }
+ fieldIndex = findField(rt, f.Name, []int{})
+ }
+ if methodIndex == -1 && len(fieldIndex) == 0 {
+ hint := ""
+ if findMethod(reflect.PtrTo(resolverType), f.Name) != -1 {
+ hint = " (hint: the method exists on the pointer type)"
+ }
+ return nil, fmt.Errorf("%s does not resolve %q: missing method for field %q%s", resolverType, typeName, f.Name, hint)
+ }
+
+ var m reflect.Method
+ var sf reflect.StructField
+ if methodIndex != -1 {
+ m = resolverType.Method(methodIndex)
+ } else {
+ sf = rt.FieldByIndex(fieldIndex)
+ }
+ fe, err := b.makeFieldExec(typeName, f, m, sf, methodIndex, fieldIndex, methodHasReceiver)
+ if err != nil {
+ var resolverName string
+ if methodIndex != -1 {
+ resolverName = m.Name
+ } else {
+ resolverName = sf.Name
+ }
+ return nil, fmt.Errorf("%s\n\tused by (%s).%s", err, resolverType, resolverName)
+ }
+ Fields[f.Name] = fe
+ }
+
+ // Check type assertions when
+ // 1) using method resolvers
+ // 2) Or resolver is not an interface type
+ typeAssertions := make(map[string]*TypeAssertion)
+ if !b.schema.UseFieldResolvers || resolverType.Kind() != reflect.Interface {
+ for _, impl := range possibleTypes {
+ methodIndex := findMethod(resolverType, "To"+impl.Name)
+ if methodIndex == -1 {
+ return nil, fmt.Errorf("%s does not resolve %q: missing method %q to convert to %q", resolverType, typeName, "To"+impl.Name, impl.Name)
+ }
+ if resolverType.Method(methodIndex).Type.NumOut() != 2 {
+ return nil, fmt.Errorf("%s does not resolve %q: method %q should return a value and a bool indicating success", resolverType, typeName, "To"+impl.Name)
+ }
+ a := &TypeAssertion{
+ MethodIndex: methodIndex,
+ }
+ if err := b.assignExec(&a.TypeExec, impl, resolverType.Method(methodIndex).Type.Out(0)); err != nil {
+ return nil, err
+ }
+ typeAssertions[impl.Name] = a
+ }
+ }
+
+ return &Object{
+ Name: typeName,
+ Fields: Fields,
+ TypeAssertions: typeAssertions,
+ }, nil
+}
+
+var contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
+var errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+func (b *execBuilder) makeFieldExec(typeName string, f *types.FieldDefinition, m reflect.Method, sf reflect.StructField,
+ methodIndex int, fieldIndex []int, methodHasReceiver bool) (*Field, error) {
+
+ var argsPacker *packer.StructPacker
+ var hasError bool
+ var hasContext bool
+
+ // Validate resolver method only when there is one
+ if methodIndex != -1 {
+ in := make([]reflect.Type, m.Type.NumIn())
+ for i := range in {
+ in[i] = m.Type.In(i)
+ }
+ if methodHasReceiver {
+ in = in[1:] // first parameter is receiver
+ }
+
+ hasContext = len(in) > 0 && in[0] == contextType
+ if hasContext {
+ in = in[1:]
+ }
+
+ if len(f.Arguments) > 0 {
+ if len(in) == 0 {
+ return nil, fmt.Errorf("must have parameter for field arguments")
+ }
+ var err error
+ argsPacker, err = b.packerBuilder.MakeStructPacker(f.Arguments, in[0])
+ if err != nil {
+ return nil, err
+ }
+ in = in[1:]
+ }
+
+ if len(in) > 0 {
+ return nil, fmt.Errorf("too many parameters")
+ }
+
+ maxNumOfReturns := 2
+ if m.Type.NumOut() < maxNumOfReturns-1 {
+ return nil, fmt.Errorf("too few return values")
+ }
+
+ if m.Type.NumOut() > maxNumOfReturns {
+ return nil, fmt.Errorf("too many return values")
+ }
+
+ hasError = m.Type.NumOut() == maxNumOfReturns
+ if hasError {
+ if m.Type.Out(maxNumOfReturns-1) != errorType {
+ return nil, fmt.Errorf(`must have "error" as its last return value`)
+ }
+ }
+ }
+
+ fe := &Field{
+ FieldDefinition: *f,
+ TypeName: typeName,
+ MethodIndex: methodIndex,
+ FieldIndex: fieldIndex,
+ HasContext: hasContext,
+ ArgsPacker: argsPacker,
+ HasError: hasError,
+ TraceLabel: fmt.Sprintf("GraphQL field: %s.%s", typeName, f.Name),
+ }
+
+ var out reflect.Type
+ if methodIndex != -1 {
+ out = m.Type.Out(0)
+ sub, ok := b.schema.EntryPoints["subscription"]
+ if ok && typeName == sub.TypeName() && out.Kind() == reflect.Chan {
+ out = m.Type.Out(0).Elem()
+ }
+ } else {
+ out = sf.Type
+ }
+ if err := b.assignExec(&fe.ValueExec, f.Type, out); err != nil {
+ return nil, err
+ }
+
+ return fe, nil
+}
+
+func findMethod(t reflect.Type, name string) int {
+ for i := 0; i < t.NumMethod(); i++ {
+ if strings.EqualFold(stripUnderscore(name), stripUnderscore(t.Method(i).Name)) {
+ return i
+ }
+ }
+ return -1
+}
+
+func findField(t reflect.Type, name string, index []int) []int {
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+
+ if field.Type.Kind() == reflect.Struct && field.Anonymous {
+ newIndex := findField(field.Type, name, []int{i})
+ if len(newIndex) > 1 {
+ return append(index, newIndex...)
+ }
+ }
+
+ if strings.EqualFold(stripUnderscore(name), stripUnderscore(field.Name)) {
+ return append(index, i)
+ }
+ }
+
+ return index
+}
+
+// fieldCount helps resolve ambiguity when more than one embedded struct contains fields with the same name.
+func fieldCount(t reflect.Type, count map[string]int) map[string]int {
+ if t.Kind() != reflect.Struct {
+ return nil
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ fieldName := strings.ToLower(stripUnderscore(field.Name))
+
+ if field.Type.Kind() == reflect.Struct && field.Anonymous {
+ count = fieldCount(field.Type, count)
+ } else {
+ if _, ok := count[fieldName]; !ok {
+ count[fieldName] = 0
+ }
+ count[fieldName]++
+ }
+ }
+
+ return count
+}
+
+func unwrapNonNull(t types.Type) (types.Type, bool) {
+ if nn, ok := t.(*types.NonNull); ok {
+ return nn.OfType, true
+ }
+ return t, false
+}
+
+func stripUnderscore(s string) string {
+ return strings.Replace(s, "_", "", -1)
+}
+
+func unwrapPtr(t reflect.Type) reflect.Type {
+ if t.Kind() == reflect.Ptr {
+ return t.Elem()
+ }
+ return t
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go
new file mode 100644
index 00000000..9b96d2b6
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go
@@ -0,0 +1,269 @@
+package selected
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/internal/exec/packer"
+ "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
+ "github.com/graph-gophers/graphql-go/internal/query"
+ "github.com/graph-gophers/graphql-go/introspection"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+type Request struct {
+ Schema *types.Schema
+ Doc *types.ExecutableDefinition
+ Vars map[string]interface{}
+ Mu sync.Mutex
+ Errs []*errors.QueryError
+ DisableIntrospection bool
+}
+
+func (r *Request) AddError(err *errors.QueryError) {
+ r.Mu.Lock()
+ r.Errs = append(r.Errs, err)
+ r.Mu.Unlock()
+}
+
+func ApplyOperation(r *Request, s *resolvable.Schema, op *types.OperationDefinition) []Selection {
+ var obj *resolvable.Object
+ switch op.Type {
+ case query.Query:
+ obj = s.Query.(*resolvable.Object)
+ case query.Mutation:
+ obj = s.Mutation.(*resolvable.Object)
+ case query.Subscription:
+ obj = s.Subscription.(*resolvable.Object)
+ }
+ return applySelectionSet(r, s, obj, op.Selections)
+}
+
+type Selection interface {
+ isSelection()
+}
+
+type SchemaField struct {
+ resolvable.Field
+ Alias string
+ Args map[string]interface{}
+ PackedArgs reflect.Value
+ Sels []Selection
+ Async bool
+ FixedResult reflect.Value
+}
+
+type TypeAssertion struct {
+ resolvable.TypeAssertion
+ Sels []Selection
+}
+
+type TypenameField struct {
+ resolvable.Object
+ Alias string
+}
+
+func (*SchemaField) isSelection() {}
+func (*TypeAssertion) isSelection() {}
+func (*TypenameField) isSelection() {}
+
+func applySelectionSet(r *Request, s *resolvable.Schema, e *resolvable.Object, sels []types.Selection) (flattenedSels []Selection) {
+ for _, sel := range sels {
+ switch sel := sel.(type) {
+ case *types.Field:
+ field := sel
+ if skipByDirective(r, field.Directives) {
+ continue
+ }
+
+ switch field.Name.Name {
+ case "__typename":
+ // __typename is available even though r.DisableIntrospection == true
+ // because it is necessary when using union types and interfaces: https://graphql.org/learn/schema/#union-types
+ flattenedSels = append(flattenedSels, &TypenameField{
+ Object: *e,
+ Alias: field.Alias.Name,
+ })
+
+ case "__schema":
+ if !r.DisableIntrospection {
+ flattenedSels = append(flattenedSels, &SchemaField{
+ Field: s.Meta.FieldSchema,
+ Alias: field.Alias.Name,
+ Sels: applySelectionSet(r, s, s.Meta.Schema, field.SelectionSet),
+ Async: true,
+ FixedResult: reflect.ValueOf(introspection.WrapSchema(r.Schema)),
+ })
+ }
+
+ case "__type":
+ if !r.DisableIntrospection {
+ p := packer.ValuePacker{ValueType: reflect.TypeOf("")}
+ v, err := p.Pack(field.Arguments.MustGet("name").Deserialize(r.Vars))
+ if err != nil {
+ r.AddError(errors.Errorf("%s", err))
+ return nil
+ }
+
+ t, ok := r.Schema.Types[v.String()]
+ if !ok {
+ return nil
+ }
+
+ flattenedSels = append(flattenedSels, &SchemaField{
+ Field: s.Meta.FieldType,
+ Alias: field.Alias.Name,
+ Sels: applySelectionSet(r, s, s.Meta.Type, field.SelectionSet),
+ Async: true,
+ FixedResult: reflect.ValueOf(introspection.WrapType(t)),
+ })
+ }
+
+ default:
+ fe := e.Fields[field.Name.Name]
+
+ var args map[string]interface{}
+ var packedArgs reflect.Value
+ if fe.ArgsPacker != nil {
+ args = make(map[string]interface{})
+ for _, arg := range field.Arguments {
+ args[arg.Name.Name] = arg.Value.Deserialize(r.Vars)
+ }
+ var err error
+ packedArgs, err = fe.ArgsPacker.Pack(args)
+ if err != nil {
+ r.AddError(errors.Errorf("%s", err))
+ return
+ }
+ }
+
+ fieldSels := applyField(r, s, fe.ValueExec, field.SelectionSet)
+ flattenedSels = append(flattenedSels, &SchemaField{
+ Field: *fe,
+ Alias: field.Alias.Name,
+ Args: args,
+ PackedArgs: packedArgs,
+ Sels: fieldSels,
+ Async: fe.HasContext || fe.ArgsPacker != nil || fe.HasError || HasAsyncSel(fieldSels),
+ })
+ }
+
+ case *types.InlineFragment:
+ frag := sel
+ if skipByDirective(r, frag.Directives) {
+ continue
+ }
+ flattenedSels = append(flattenedSels, applyFragment(r, s, e, &frag.Fragment)...)
+
+ case *types.FragmentSpread:
+ spread := sel
+ if skipByDirective(r, spread.Directives) {
+ continue
+ }
+ flattenedSels = append(flattenedSels, applyFragment(r, s, e, &r.Doc.Fragments.Get(spread.Name.Name).Fragment)...)
+
+ default:
+ panic("invalid type")
+ }
+ }
+ return
+}
+
+func applyFragment(r *Request, s *resolvable.Schema, e *resolvable.Object, frag *types.Fragment) []Selection {
+ if frag.On.Name != e.Name {
+ t := r.Schema.Resolve(frag.On.Name)
+ face, ok := t.(*types.InterfaceTypeDefinition)
+ if !ok && frag.On.Name != "" {
+ a, ok2 := e.TypeAssertions[frag.On.Name]
+ if !ok2 {
+ panic(fmt.Errorf("%q does not implement %q", frag.On, e.Name)) // TODO proper error handling
+ }
+
+ return []Selection{&TypeAssertion{
+ TypeAssertion: *a,
+ Sels: applySelectionSet(r, s, a.TypeExec.(*resolvable.Object), frag.Selections),
+ }}
+ }
+ if ok && len(face.PossibleTypes) > 0 {
+ sels := []Selection{}
+ for _, t := range face.PossibleTypes {
+ if t.Name == e.Name {
+ return applySelectionSet(r, s, e, frag.Selections)
+ }
+
+ if a, ok := e.TypeAssertions[t.Name]; ok {
+ sels = append(sels, &TypeAssertion{
+ TypeAssertion: *a,
+ Sels: applySelectionSet(r, s, a.TypeExec.(*resolvable.Object), frag.Selections),
+ })
+ }
+ }
+ if len(sels) == 0 {
+ panic(fmt.Errorf("%q does not implement %q", e.Name, frag.On)) // TODO proper error handling
+ }
+ return sels
+ }
+ }
+ return applySelectionSet(r, s, e, frag.Selections)
+}
+
+func applyField(r *Request, s *resolvable.Schema, e resolvable.Resolvable, sels []types.Selection) []Selection {
+ switch e := e.(type) {
+ case *resolvable.Object:
+ return applySelectionSet(r, s, e, sels)
+ case *resolvable.List:
+ return applyField(r, s, e.Elem, sels)
+ case *resolvable.Scalar:
+ return nil
+ default:
+ panic("unreachable")
+ }
+}
+
+func skipByDirective(r *Request, directives types.DirectiveList) bool {
+ if d := directives.Get("skip"); d != nil {
+ p := packer.ValuePacker{ValueType: reflect.TypeOf(false)}
+ v, err := p.Pack(d.Arguments.MustGet("if").Deserialize(r.Vars))
+ if err != nil {
+ r.AddError(errors.Errorf("%s", err))
+ }
+ if err == nil && v.Bool() {
+ return true
+ }
+ }
+
+ if d := directives.Get("include"); d != nil {
+ p := packer.ValuePacker{ValueType: reflect.TypeOf(false)}
+ v, err := p.Pack(d.Arguments.MustGet("if").Deserialize(r.Vars))
+ if err != nil {
+ r.AddError(errors.Errorf("%s", err))
+ }
+ if err == nil && !v.Bool() {
+ return true
+ }
+ }
+
+ return false
+}
+
+func HasAsyncSel(sels []Selection) bool {
+ for _, sel := range sels {
+ switch sel := sel.(type) {
+ case *SchemaField:
+ if sel.Async {
+ return true
+ }
+ case *TypeAssertion:
+ if HasAsyncSel(sel.Sels) {
+ return true
+ }
+ case *TypenameField:
+ // sync
+ default:
+ panic("unreachable")
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go
new file mode 100644
index 00000000..37ebacbc
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go
@@ -0,0 +1,179 @@
+package exec
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
+ "github.com/graph-gophers/graphql-go/internal/exec/selected"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+type Response struct {
+ Data json.RawMessage
+ Errors []*errors.QueryError
+}
+
+func (r *Request) Subscribe(ctx context.Context, s *resolvable.Schema, op *types.OperationDefinition) <-chan *Response {
+ var result reflect.Value
+ var f *fieldToExec
+ var err *errors.QueryError
+ func() {
+ defer r.handlePanic(ctx)
+
+ sels := selected.ApplyOperation(&r.Request, s, op)
+ var fields []*fieldToExec
+ collectFieldsToResolve(sels, s, s.Resolver, &fields, make(map[string]*fieldToExec))
+
+ // TODO: move this check into validation.Validate
+ if len(fields) != 1 {
+ err = errors.Errorf("%s", "can subscribe to at most one subscription at a time")
+ return
+ }
+ f = fields[0]
+
+ var in []reflect.Value
+ if f.field.HasContext {
+ in = append(in, reflect.ValueOf(ctx))
+ }
+ if f.field.ArgsPacker != nil {
+ in = append(in, f.field.PackedArgs)
+ }
+ callOut := f.resolver.Method(f.field.MethodIndex).Call(in)
+ result = callOut[0]
+
+ if f.field.HasError && !callOut[1].IsNil() {
+ switch resolverErr := callOut[1].Interface().(type) {
+ case *errors.QueryError:
+ err = resolverErr
+ case error:
+ err = errors.Errorf("%s", resolverErr)
+ err.ResolverError = resolverErr
+ default:
+ panic(fmt.Errorf("can only deal with *QueryError and error types, got %T", resolverErr))
+ }
+ }
+ }()
+
+ // Handles the case where the locally executed func above panicked
+ if len(r.Request.Errs) > 0 {
+ return sendAndReturnClosed(&Response{Errors: r.Request.Errs})
+ }
+
+ if f == nil {
+ return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{err}})
+ }
+
+ if err != nil {
+ if _, nonNullChild := f.field.Type.(*types.NonNull); nonNullChild {
+ return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{err}})
+ }
+ return sendAndReturnClosed(&Response{Data: []byte(fmt.Sprintf(`{"%s":null}`, f.field.Alias)), Errors: []*errors.QueryError{err}})
+ }
+
+ if ctxErr := ctx.Err(); ctxErr != nil {
+ return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{errors.Errorf("%s", ctxErr)}})
+ }
+
+ c := make(chan *Response)
+ // TODO: handle resolver nil channel better?
+ if result.IsZero() {
+ close(c)
+ return c
+ }
+
+ go func() {
+ for {
+ // Check subscription context
+ chosen, resp, ok := reflect.Select([]reflect.SelectCase{
+ {
+ Dir: reflect.SelectRecv,
+ Chan: reflect.ValueOf(ctx.Done()),
+ },
+ {
+ Dir: reflect.SelectRecv,
+ Chan: result,
+ },
+ })
+ switch chosen {
+ // subscription context done
+ case 0:
+ close(c)
+ return
+ // upstream received
+ case 1:
+ // upstream closed
+ if !ok {
+ close(c)
+ return
+ }
+
+ subR := &Request{
+ Request: selected.Request{
+ Doc: r.Request.Doc,
+ Vars: r.Request.Vars,
+ Schema: r.Request.Schema,
+ },
+ Limiter: r.Limiter,
+ Tracer: r.Tracer,
+ Logger: r.Logger,
+ }
+ var out bytes.Buffer
+ func() {
+ timeout := r.SubscribeResolverTimeout
+ if timeout == 0 {
+ timeout = time.Second
+ }
+
+ subCtx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ // resolve response
+ func() {
+ defer subR.handlePanic(subCtx)
+
+ var buf bytes.Buffer
+ subR.execSelectionSet(subCtx, f.sels, f.field.Type, &pathSegment{nil, f.field.Alias}, s, resp, &buf)
+
+ propagateChildError := false
+ if _, nonNullChild := f.field.Type.(*types.NonNull); nonNullChild && resolvedToNull(&buf) {
+ propagateChildError = true
+ }
+
+ if !propagateChildError {
+ out.WriteString(fmt.Sprintf(`{"%s":`, f.field.Alias))
+ out.Write(buf.Bytes())
+ out.WriteString(`}`)
+ }
+ }()
+
+ if err := subCtx.Err(); err != nil {
+ c <- &Response{Errors: []*errors.QueryError{errors.Errorf("%s", err)}}
+ return
+ }
+
+ // Send response within timeout
+ // TODO: maybe block until sent?
+ select {
+ case <-subCtx.Done():
+ case c <- &Response{Data: out.Bytes(), Errors: subR.Errs}:
+ }
+ }()
+ }
+ }
+ }()
+
+ return c
+}
+
+func sendAndReturnClosed(resp *Response) chan *Response {
+ c := make(chan *Response, 1)
+ c <- resp
+ close(c)
+ return c
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go b/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go
new file mode 100644
index 00000000..ca0400cd
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go
@@ -0,0 +1,156 @@
+package query
+
+import (
+ "fmt"
+ "text/scanner"
+
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/internal/common"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+const (
+ Query types.OperationType = "QUERY"
+ Mutation types.OperationType = "MUTATION"
+ Subscription types.OperationType = "SUBSCRIPTION"
+)
+
+func Parse(queryString string) (*types.ExecutableDefinition, *errors.QueryError) {
+ l := common.NewLexer(queryString, false)
+
+ var execDef *types.ExecutableDefinition
+ err := l.CatchSyntaxError(func() { execDef = parseExecutableDefinition(l) })
+ if err != nil {
+ return nil, err
+ }
+
+ return execDef, nil
+}
+
+func parseExecutableDefinition(l *common.Lexer) *types.ExecutableDefinition {
+ ed := &types.ExecutableDefinition{}
+ l.ConsumeWhitespace()
+ for l.Peek() != scanner.EOF {
+ if l.Peek() == '{' {
+ op := &types.OperationDefinition{Type: Query, Loc: l.Location()}
+ op.Selections = parseSelectionSet(l)
+ ed.Operations = append(ed.Operations, op)
+ continue
+ }
+
+ loc := l.Location()
+ switch x := l.ConsumeIdent(); x {
+ case "query":
+ op := parseOperation(l, Query)
+ op.Loc = loc
+ ed.Operations = append(ed.Operations, op)
+
+ case "mutation":
+ ed.Operations = append(ed.Operations, parseOperation(l, Mutation))
+
+ case "subscription":
+ ed.Operations = append(ed.Operations, parseOperation(l, Subscription))
+
+ case "fragment":
+ frag := parseFragment(l)
+ frag.Loc = loc
+ ed.Fragments = append(ed.Fragments, frag)
+
+ default:
+ l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "fragment"`, x))
+ }
+ }
+ return ed
+}
+
+func parseOperation(l *common.Lexer, opType types.OperationType) *types.OperationDefinition {
+ op := &types.OperationDefinition{Type: opType}
+ op.Name.Loc = l.Location()
+ if l.Peek() == scanner.Ident {
+ op.Name = l.ConsumeIdentWithLoc()
+ }
+ op.Directives = common.ParseDirectives(l)
+ if l.Peek() == '(' {
+ l.ConsumeToken('(')
+ for l.Peek() != ')' {
+ loc := l.Location()
+ l.ConsumeToken('$')
+ iv := common.ParseInputValue(l)
+ iv.Loc = loc
+ op.Vars = append(op.Vars, iv)
+ }
+ l.ConsumeToken(')')
+ }
+ op.Selections = parseSelectionSet(l)
+ return op
+}
+
+func parseFragment(l *common.Lexer) *types.FragmentDefinition {
+ f := &types.FragmentDefinition{}
+ f.Name = l.ConsumeIdentWithLoc()
+ l.ConsumeKeyword("on")
+ f.On = types.TypeName{Ident: l.ConsumeIdentWithLoc()}
+ f.Directives = common.ParseDirectives(l)
+ f.Selections = parseSelectionSet(l)
+ return f
+}
+
+func parseSelectionSet(l *common.Lexer) []types.Selection {
+ var sels []types.Selection
+ l.ConsumeToken('{')
+ for l.Peek() != '}' {
+ sels = append(sels, parseSelection(l))
+ }
+ l.ConsumeToken('}')
+ return sels
+}
+
+func parseSelection(l *common.Lexer) types.Selection {
+ if l.Peek() == '.' {
+ return parseSpread(l)
+ }
+ return parseFieldDef(l)
+}
+
+func parseFieldDef(l *common.Lexer) *types.Field {
+ f := &types.Field{}
+ f.Alias = l.ConsumeIdentWithLoc()
+ f.Name = f.Alias
+ if l.Peek() == ':' {
+ l.ConsumeToken(':')
+ f.Name = l.ConsumeIdentWithLoc()
+ }
+ if l.Peek() == '(' {
+ f.Arguments = common.ParseArgumentList(l)
+ }
+ f.Directives = common.ParseDirectives(l)
+ if l.Peek() == '{' {
+ f.SelectionSetLoc = l.Location()
+ f.SelectionSet = parseSelectionSet(l)
+ }
+ return f
+}
+
+func parseSpread(l *common.Lexer) types.Selection {
+ loc := l.Location()
+ l.ConsumeToken('.')
+ l.ConsumeToken('.')
+ l.ConsumeToken('.')
+
+ f := &types.InlineFragment{Loc: loc}
+ if l.Peek() == scanner.Ident {
+ ident := l.ConsumeIdentWithLoc()
+ if ident.Name != "on" {
+ fs := &types.FragmentSpread{
+ Name: ident,
+ Loc: loc,
+ }
+ fs.Directives = common.ParseDirectives(l)
+ return fs
+ }
+ f.On = types.TypeName{Ident: l.ConsumeIdentWithLoc()}
+ }
+ f.Directives = common.ParseDirectives(l)
+ f.Selections = parseSelectionSet(l)
+ return f
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go b/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go
new file mode 100644
index 00000000..9f5bba56
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go
@@ -0,0 +1,203 @@
+package schema
+
+import (
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+func init() {
+ _ = newMeta()
+}
+
+// newMeta initializes an instance of the meta Schema.
+func newMeta() *types.Schema {
+ s := &types.Schema{
+ EntryPointNames: make(map[string]string),
+ Types: make(map[string]types.NamedType),
+ Directives: make(map[string]*types.DirectiveDefinition),
+ }
+
+ err := Parse(s, metaSrc, false)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+var metaSrc = `
+ # The ` + "`" + `Int` + "`" + ` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1.
+ scalar Int
+
+ # The ` + "`" + `Float` + "`" + ` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point).
+ scalar Float
+
+ # The ` + "`" + `String` + "`" + ` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text.
+ scalar String
+
+ # The ` + "`" + `Boolean` + "`" + ` scalar type represents ` + "`" + `true` + "`" + ` or ` + "`" + `false` + "`" + `.
+ scalar Boolean
+
+ # The ` + "`" + `ID` + "`" + ` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as ` + "`" + `"4"` + "`" + `) or integer (such as ` + "`" + `4` + "`" + `) input value will be accepted as an ID.
+ scalar ID
+
+ # Directs the executor to include this field or fragment only when the ` + "`" + `if` + "`" + ` argument is true.
+ directive @include(
+ # Included when true.
+ if: Boolean!
+ ) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
+
+ # Directs the executor to skip this field or fragment when the ` + "`" + `if` + "`" + ` argument is true.
+ directive @skip(
+ # Skipped when true.
+ if: Boolean!
+ ) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
+
+ # Marks an element of a GraphQL schema as no longer supported.
+ directive @deprecated(
+ # Explains why this element was deprecated, usually also including a suggestion
+ # for how to access supported similar data. Formatted in
+ # [Markdown](https://daringfireball.net/projects/markdown/).
+ reason: String = "No longer supported"
+ ) on FIELD_DEFINITION | ENUM_VALUE
+
+ # A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.
+ #
+ # In some cases, you need to provide options to alter GraphQL's execution behavior
+ # in ways field arguments will not suffice, such as conditionally including or
+ # skipping a field. Directives provide this by describing additional information
+ # to the executor.
+ type __Directive {
+ name: String!
+ description: String
+ locations: [__DirectiveLocation!]!
+ args: [__InputValue!]!
+ }
+
+ # A Directive can be adjacent to many parts of the GraphQL language, a
+ # __DirectiveLocation describes one such possible adjacencies.
+ enum __DirectiveLocation {
+ # Location adjacent to a query operation.
+ QUERY
+ # Location adjacent to a mutation operation.
+ MUTATION
+ # Location adjacent to a subscription operation.
+ SUBSCRIPTION
+ # Location adjacent to a field.
+ FIELD
+ # Location adjacent to a fragment definition.
+ FRAGMENT_DEFINITION
+ # Location adjacent to a fragment spread.
+ FRAGMENT_SPREAD
+ # Location adjacent to an inline fragment.
+ INLINE_FRAGMENT
+ # Location adjacent to a schema definition.
+ SCHEMA
+ # Location adjacent to a scalar definition.
+ SCALAR
+ # Location adjacent to an object type definition.
+ OBJECT
+ # Location adjacent to a field definition.
+ FIELD_DEFINITION
+ # Location adjacent to an argument definition.
+ ARGUMENT_DEFINITION
+ # Location adjacent to an interface definition.
+ INTERFACE
+ # Location adjacent to a union definition.
+ UNION
+ # Location adjacent to an enum definition.
+ ENUM
+ # Location adjacent to an enum value definition.
+ ENUM_VALUE
+ # Location adjacent to an input object type definition.
+ INPUT_OBJECT
+ # Location adjacent to an input object field definition.
+ INPUT_FIELD_DEFINITION
+ }
+
+ # One possible value for a given Enum. Enum values are unique values, not a
+ # placeholder for a string or numeric value. However an Enum value is returned in
+ # a JSON response as a string.
+ type __EnumValue {
+ name: String!
+ description: String
+ isDeprecated: Boolean!
+ deprecationReason: String
+ }
+
+ # Object and Interface types are described by a list of Fields, each of which has
+ # a name, potentially a list of arguments, and a return type.
+ type __Field {
+ name: String!
+ description: String
+ args: [__InputValue!]!
+ type: __Type!
+ isDeprecated: Boolean!
+ deprecationReason: String
+ }
+
+ # Arguments provided to Fields or Directives and the input fields of an
+ # InputObject are represented as Input Values which describe their type and
+ # optionally a default value.
+ type __InputValue {
+ name: String!
+ description: String
+ type: __Type!
+ # A GraphQL-formatted string representing the default value for this input value.
+ defaultValue: String
+ }
+
+ # A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all
+ # available types and directives on the server, as well as the entry points for
+ # query, mutation, and subscription operations.
+ type __Schema {
+ # A list of all types supported by this server.
+ types: [__Type!]!
+ # The type that query operations will be rooted at.
+ queryType: __Type!
+ # If this server supports mutation, the type that mutation operations will be rooted at.
+ mutationType: __Type
+ # If this server support subscription, the type that subscription operations will be rooted at.
+ subscriptionType: __Type
+ # A list of all directives supported by this server.
+ directives: [__Directive!]!
+ }
+
+ # The fundamental unit of any GraphQL Schema is the type. There are many kinds of
+ # types in GraphQL as represented by the ` + "`" + `__TypeKind` + "`" + ` enum.
+ #
+ # Depending on the kind of a type, certain fields describe information about that
+ # type. Scalar types provide no information beyond a name and description, while
+ # Enum types provide their values. Object and Interface types provide the fields
+ # they describe. Abstract types, Union and Interface, provide the Object types
+ # possible at runtime. List and NonNull types compose other types.
+ type __Type {
+ kind: __TypeKind!
+ name: String
+ description: String
+ fields(includeDeprecated: Boolean = false): [__Field!]
+ interfaces: [__Type!]
+ possibleTypes: [__Type!]
+ enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
+ inputFields: [__InputValue!]
+ ofType: __Type
+ }
+
+ # An enum describing what kind of type a given ` + "`" + `__Type` + "`" + ` is.
+ enum __TypeKind {
+ # Indicates this type is a scalar.
+ SCALAR
+ # Indicates this type is an object. ` + "`" + `fields` + "`" + ` and ` + "`" + `interfaces` + "`" + ` are valid fields.
+ OBJECT
+ # Indicates this type is an interface. ` + "`" + `fields` + "`" + ` and ` + "`" + `possibleTypes` + "`" + ` are valid fields.
+ INTERFACE
+ # Indicates this type is a union. ` + "`" + `possibleTypes` + "`" + ` is a valid field.
+ UNION
+ # Indicates this type is an enum. ` + "`" + `enumValues` + "`" + ` is a valid field.
+ ENUM
+ # Indicates this type is an input object. ` + "`" + `inputFields` + "`" + ` is a valid field.
+ INPUT_OBJECT
+ # Indicates this type is a list. ` + "`" + `ofType` + "`" + ` is a valid field.
+ LIST
+ # Indicates this type is a non-null. ` + "`" + `ofType` + "`" + ` is a valid field.
+ NON_NULL
+ }
+`
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go b/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go
new file mode 100644
index 00000000..fb301c46
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go
@@ -0,0 +1,586 @@
+package schema
+
+import (
+ "fmt"
+ "text/scanner"
+
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/internal/common"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+// New initializes an instance of Schema.
+func New() *types.Schema {
+ s := &types.Schema{
+ EntryPointNames: make(map[string]string),
+ Types: make(map[string]types.NamedType),
+ Directives: make(map[string]*types.DirectiveDefinition),
+ }
+ m := newMeta()
+ for n, t := range m.Types {
+ s.Types[n] = t
+ }
+ for n, d := range m.Directives {
+ s.Directives[n] = d
+ }
+ return s
+}
+
+func Parse(s *types.Schema, schemaString string, useStringDescriptions bool) error {
+ l := common.NewLexer(schemaString, useStringDescriptions)
+ err := l.CatchSyntaxError(func() { parseSchema(s, l) })
+ if err != nil {
+ return err
+ }
+
+ if err := mergeExtensions(s); err != nil {
+ return err
+ }
+
+ for _, t := range s.Types {
+ if err := resolveNamedType(s, t); err != nil {
+ return err
+ }
+ }
+ for _, d := range s.Directives {
+ for _, arg := range d.Arguments {
+ t, err := common.ResolveType(arg.Type, s.Resolve)
+ if err != nil {
+ return err
+ }
+ arg.Type = t
+ }
+ }
+
+ // https://graphql.github.io/graphql-spec/June2018/#sec-Root-Operation-Types
+ // > While any type can be the root operation type for a GraphQL operation, the type system definition language can
+ // > omit the schema definition when the query, mutation, and subscription root types are named Query, Mutation,
+ // > and Subscription respectively.
+ if len(s.EntryPointNames) == 0 {
+ if _, ok := s.Types["Query"]; ok {
+ s.EntryPointNames["query"] = "Query"
+ }
+ if _, ok := s.Types["Mutation"]; ok {
+ s.EntryPointNames["mutation"] = "Mutation"
+ }
+ if _, ok := s.Types["Subscription"]; ok {
+ s.EntryPointNames["subscription"] = "Subscription"
+ }
+ }
+ s.EntryPoints = make(map[string]types.NamedType)
+ for key, name := range s.EntryPointNames {
+ t, ok := s.Types[name]
+ if !ok {
+ return errors.Errorf("type %q not found", name)
+ }
+ s.EntryPoints[key] = t
+ }
+
+ // Interface types need validation: https://spec.graphql.org/draft/#sec-Interfaces.Interfaces-Implementing-Interfaces
+ for _, typeDef := range s.Types {
+ switch t := typeDef.(type) {
+ case *types.InterfaceTypeDefinition:
+ for i, implements := range t.Interfaces {
+ typ, ok := s.Types[implements.Name]
+ if !ok {
+ return errors.Errorf("interface %q not found", implements)
+ }
+ inteface, ok := typ.(*types.InterfaceTypeDefinition)
+ if !ok {
+ return errors.Errorf("type %q is not an interface", inteface)
+ }
+
+ for _, f := range inteface.Fields.Names() {
+ if t.Fields.Get(f) == nil {
+ return errors.Errorf("interface %q expects field %q but %q does not provide it", inteface.Name, f, t.Name)
+ }
+ }
+
+ t.Interfaces[i] = inteface
+ }
+ default:
+ continue
+ }
+ }
+
+ for _, obj := range s.Objects {
+ obj.Interfaces = make([]*types.InterfaceTypeDefinition, len(obj.InterfaceNames))
+ if err := resolveDirectives(s, obj.Directives, "OBJECT"); err != nil {
+ return err
+ }
+ for _, field := range obj.Fields {
+ if err := resolveDirectives(s, field.Directives, "FIELD_DEFINITION"); err != nil {
+ return err
+ }
+ }
+ for i, intfName := range obj.InterfaceNames {
+ t, ok := s.Types[intfName]
+ if !ok {
+ return errors.Errorf("interface %q not found", intfName)
+ }
+ intf, ok := t.(*types.InterfaceTypeDefinition)
+ if !ok {
+ return errors.Errorf("type %q is not an interface", intfName)
+ }
+ for _, f := range intf.Fields.Names() {
+ if obj.Fields.Get(f) == nil {
+ return errors.Errorf("interface %q expects field %q but %q does not provide it", intfName, f, obj.Name)
+ }
+ }
+ obj.Interfaces[i] = intf
+ intf.PossibleTypes = append(intf.PossibleTypes, obj)
+ }
+ }
+
+ for _, union := range s.Unions {
+ if err := resolveDirectives(s, union.Directives, "UNION"); err != nil {
+ return err
+ }
+ union.UnionMemberTypes = make([]*types.ObjectTypeDefinition, len(union.TypeNames))
+ for i, name := range union.TypeNames {
+ t, ok := s.Types[name]
+ if !ok {
+ return errors.Errorf("object type %q not found", name)
+ }
+ obj, ok := t.(*types.ObjectTypeDefinition)
+ if !ok {
+ return errors.Errorf("type %q is not an object", name)
+ }
+ union.UnionMemberTypes[i] = obj
+ }
+ }
+
+ for _, enum := range s.Enums {
+ if err := resolveDirectives(s, enum.Directives, "ENUM"); err != nil {
+ return err
+ }
+ for _, value := range enum.EnumValuesDefinition {
+ if err := resolveDirectives(s, value.Directives, "ENUM_VALUE"); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func ParseSchema(schemaString string, useStringDescriptions bool) (*types.Schema, error) {
+ s := New()
+ err := Parse(s, schemaString, useStringDescriptions)
+ return s, err
+}
+
+func mergeExtensions(s *types.Schema) error {
+ for _, ext := range s.Extensions {
+ typ := s.Types[ext.Type.TypeName()]
+ if typ == nil {
+ return fmt.Errorf("trying to extend unknown type %q", ext.Type.TypeName())
+ }
+
+ if typ.Kind() != ext.Type.Kind() {
+ return fmt.Errorf("trying to extend type %q with type %q", typ.Kind(), ext.Type.Kind())
+ }
+
+ switch og := typ.(type) {
+ case *types.ObjectTypeDefinition:
+ e := ext.Type.(*types.ObjectTypeDefinition)
+
+ for _, field := range e.Fields {
+ if og.Fields.Get(field.Name) != nil {
+ return fmt.Errorf("extended field %q already exists", field.Name)
+ }
+ }
+ og.Fields = append(og.Fields, e.Fields...)
+
+ for _, en := range e.InterfaceNames {
+ for _, on := range og.InterfaceNames {
+ if on == en {
+ return fmt.Errorf("interface %q implemented in the extension is already implemented in %q", on, og.Name)
+ }
+ }
+ }
+ og.InterfaceNames = append(og.InterfaceNames, e.InterfaceNames...)
+
+ case *types.InputObject:
+ e := ext.Type.(*types.InputObject)
+
+ for _, field := range e.Values {
+ if og.Values.Get(field.Name.Name) != nil {
+ return fmt.Errorf("extended field %q already exists", field.Name)
+ }
+ }
+ og.Values = append(og.Values, e.Values...)
+
+ case *types.InterfaceTypeDefinition:
+ e := ext.Type.(*types.InterfaceTypeDefinition)
+
+ for _, field := range e.Fields {
+ if og.Fields.Get(field.Name) != nil {
+ return fmt.Errorf("extended field %s already exists", field.Name)
+ }
+ }
+ og.Fields = append(og.Fields, e.Fields...)
+
+ case *types.Union:
+ e := ext.Type.(*types.Union)
+
+ for _, en := range e.TypeNames {
+ for _, on := range og.TypeNames {
+ if on == en {
+ return fmt.Errorf("union type %q already declared in %q", on, og.Name)
+ }
+ }
+ }
+ og.TypeNames = append(og.TypeNames, e.TypeNames...)
+
+ case *types.EnumTypeDefinition:
+ e := ext.Type.(*types.EnumTypeDefinition)
+
+ for _, en := range e.EnumValuesDefinition {
+ for _, on := range og.EnumValuesDefinition {
+ if on.EnumValue == en.EnumValue {
+ return fmt.Errorf("enum value %q already declared in %q", on.EnumValue, og.Name)
+ }
+ }
+ }
+ og.EnumValuesDefinition = append(og.EnumValuesDefinition, e.EnumValuesDefinition...)
+ default:
+ return fmt.Errorf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union" or "input"`, og.TypeName())
+ }
+ }
+
+ return nil
+}
+
+func resolveNamedType(s *types.Schema, t types.NamedType) error {
+ switch t := t.(type) {
+ case *types.ObjectTypeDefinition:
+ for _, f := range t.Fields {
+ if err := resolveField(s, f); err != nil {
+ return err
+ }
+ }
+ case *types.InterfaceTypeDefinition:
+ for _, f := range t.Fields {
+ if err := resolveField(s, f); err != nil {
+ return err
+ }
+ }
+ case *types.InputObject:
+ if err := resolveInputObject(s, t.Values); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func resolveField(s *types.Schema, f *types.FieldDefinition) error {
+ t, err := common.ResolveType(f.Type, s.Resolve)
+ if err != nil {
+ return err
+ }
+ f.Type = t
+ if err := resolveDirectives(s, f.Directives, "FIELD_DEFINITION"); err != nil {
+ return err
+ }
+ return resolveInputObject(s, f.Arguments)
+}
+
+func resolveDirectives(s *types.Schema, directives types.DirectiveList, loc string) error {
+ for _, d := range directives {
+ dirName := d.Name.Name
+ dd, ok := s.Directives[dirName]
+ if !ok {
+ return errors.Errorf("directive %q not found", dirName)
+ }
+ validLoc := false
+ for _, l := range dd.Locations {
+ if l == loc {
+ validLoc = true
+ break
+ }
+ }
+ if !validLoc {
+ return errors.Errorf("invalid location %q for directive %q (must be one of %v)", loc, dirName, dd.Locations)
+ }
+ for _, arg := range d.Arguments {
+ if dd.Arguments.Get(arg.Name.Name) == nil {
+ return errors.Errorf("invalid argument %q for directive %q", arg.Name.Name, dirName)
+ }
+ }
+ for _, arg := range dd.Arguments {
+ if _, ok := d.Arguments.Get(arg.Name.Name); !ok {
+ d.Arguments = append(d.Arguments, &types.Argument{Name: arg.Name, Value: arg.Default})
+ }
+ }
+ }
+ return nil
+}
+
+func resolveInputObject(s *types.Schema, values types.ArgumentsDefinition) error {
+ for _, v := range values {
+ t, err := common.ResolveType(v.Type, s.Resolve)
+ if err != nil {
+ return err
+ }
+ v.Type = t
+ }
+ return nil
+}
+
+func parseSchema(s *types.Schema, l *common.Lexer) {
+ l.ConsumeWhitespace()
+
+ for l.Peek() != scanner.EOF {
+ desc := l.DescComment()
+ switch x := l.ConsumeIdent(); x {
+
+ case "schema":
+ l.ConsumeToken('{')
+ for l.Peek() != '}' {
+
+ name := l.ConsumeIdent()
+ l.ConsumeToken(':')
+ typ := l.ConsumeIdent()
+ s.EntryPointNames[name] = typ
+ }
+ l.ConsumeToken('}')
+
+ case "type":
+ obj := parseObjectDef(l)
+ obj.Desc = desc
+ s.Types[obj.Name] = obj
+ s.Objects = append(s.Objects, obj)
+
+ case "interface":
+ iface := parseInterfaceDef(l)
+ iface.Desc = desc
+ s.Types[iface.Name] = iface
+
+ case "union":
+ union := parseUnionDef(l)
+ union.Desc = desc
+ s.Types[union.Name] = union
+ s.Unions = append(s.Unions, union)
+
+ case "enum":
+ enum := parseEnumDef(l)
+ enum.Desc = desc
+ s.Types[enum.Name] = enum
+ s.Enums = append(s.Enums, enum)
+
+ case "input":
+ input := parseInputDef(l)
+ input.Desc = desc
+ s.Types[input.Name] = input
+
+ case "scalar":
+ loc := l.Location()
+ name := l.ConsumeIdent()
+ directives := common.ParseDirectives(l)
+ s.Types[name] = &types.ScalarTypeDefinition{Name: name, Desc: desc, Directives: directives, Loc: loc}
+
+ case "directive":
+ directive := parseDirectiveDef(l)
+ directive.Desc = desc
+ s.Directives[directive.Name] = directive
+
+ case "extend":
+ parseExtension(s, l)
+
+ default:
+ // TODO: Add support for type extensions.
+ l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union", "input", "scalar" or "directive"`, x))
+ }
+ }
+}
+
+func parseObjectDef(l *common.Lexer) *types.ObjectTypeDefinition {
+ object := &types.ObjectTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()}
+
+ for {
+ if l.Peek() == '{' {
+ break
+ }
+
+ if l.Peek() == '@' {
+ object.Directives = common.ParseDirectives(l)
+ continue
+ }
+
+ if l.Peek() == scanner.Ident {
+ l.ConsumeKeyword("implements")
+
+ for l.Peek() != '{' && l.Peek() != '@' {
+ if l.Peek() == '&' {
+ l.ConsumeToken('&')
+ }
+
+ object.InterfaceNames = append(object.InterfaceNames, l.ConsumeIdent())
+ }
+ continue
+ }
+
+ }
+ l.ConsumeToken('{')
+ object.Fields = parseFieldsDef(l)
+ l.ConsumeToken('}')
+
+ return object
+
+}
+
+func parseInterfaceDef(l *common.Lexer) *types.InterfaceTypeDefinition {
+ i := &types.InterfaceTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()}
+
+ if l.Peek() == scanner.Ident {
+ l.ConsumeKeyword("implements")
+ i.Interfaces = append(i.Interfaces, &types.InterfaceTypeDefinition{Name: l.ConsumeIdent()})
+
+ for l.Peek() == '&' {
+ l.ConsumeToken('&')
+ i.Interfaces = append(i.Interfaces, &types.InterfaceTypeDefinition{Name: l.ConsumeIdent()})
+ }
+ }
+
+ i.Directives = common.ParseDirectives(l)
+
+ l.ConsumeToken('{')
+ i.Fields = parseFieldsDef(l)
+ l.ConsumeToken('}')
+
+ return i
+}
+
+func parseUnionDef(l *common.Lexer) *types.Union {
+ union := &types.Union{Loc: l.Location(), Name: l.ConsumeIdent()}
+
+ union.Directives = common.ParseDirectives(l)
+ l.ConsumeToken('=')
+ union.TypeNames = []string{l.ConsumeIdent()}
+ for l.Peek() == '|' {
+ l.ConsumeToken('|')
+ union.TypeNames = append(union.TypeNames, l.ConsumeIdent())
+ }
+
+ return union
+}
+
+func parseInputDef(l *common.Lexer) *types.InputObject {
+ i := &types.InputObject{}
+ i.Loc = l.Location()
+ i.Name = l.ConsumeIdent()
+ i.Directives = common.ParseDirectives(l)
+ l.ConsumeToken('{')
+ for l.Peek() != '}' {
+ i.Values = append(i.Values, common.ParseInputValue(l))
+ }
+ l.ConsumeToken('}')
+ return i
+}
+
+func parseEnumDef(l *common.Lexer) *types.EnumTypeDefinition {
+ enum := &types.EnumTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()}
+
+ enum.Directives = common.ParseDirectives(l)
+ l.ConsumeToken('{')
+ for l.Peek() != '}' {
+ v := &types.EnumValueDefinition{
+ Desc: l.DescComment(),
+ Loc: l.Location(),
+ EnumValue: l.ConsumeIdent(),
+ Directives: common.ParseDirectives(l),
+ }
+
+ enum.EnumValuesDefinition = append(enum.EnumValuesDefinition, v)
+ }
+ l.ConsumeToken('}')
+ return enum
+}
+func parseDirectiveDef(l *common.Lexer) *types.DirectiveDefinition {
+ l.ConsumeToken('@')
+ loc := l.Location()
+ d := &types.DirectiveDefinition{Name: l.ConsumeIdent(), Loc: loc}
+
+ if l.Peek() == '(' {
+ l.ConsumeToken('(')
+ for l.Peek() != ')' {
+ v := common.ParseInputValue(l)
+ d.Arguments = append(d.Arguments, v)
+ }
+ l.ConsumeToken(')')
+ }
+
+ l.ConsumeKeyword("on")
+
+ for {
+ loc := l.ConsumeIdent()
+ d.Locations = append(d.Locations, loc)
+ if l.Peek() != '|' {
+ break
+ }
+ l.ConsumeToken('|')
+ }
+ return d
+}
+
+func parseExtension(s *types.Schema, l *common.Lexer) {
+ loc := l.Location()
+ switch x := l.ConsumeIdent(); x {
+ case "schema":
+ l.ConsumeToken('{')
+ for l.Peek() != '}' {
+ name := l.ConsumeIdent()
+ l.ConsumeToken(':')
+ typ := l.ConsumeIdent()
+ s.EntryPointNames[name] = typ
+ }
+ l.ConsumeToken('}')
+
+ case "type":
+ obj := parseObjectDef(l)
+ s.Extensions = append(s.Extensions, &types.Extension{Type: obj, Loc: loc})
+
+ case "interface":
+ iface := parseInterfaceDef(l)
+ s.Extensions = append(s.Extensions, &types.Extension{Type: iface, Loc: loc})
+
+ case "union":
+ union := parseUnionDef(l)
+ s.Extensions = append(s.Extensions, &types.Extension{Type: union, Loc: loc})
+
+ case "enum":
+ enum := parseEnumDef(l)
+ s.Extensions = append(s.Extensions, &types.Extension{Type: enum, Loc: loc})
+
+ case "input":
+ input := parseInputDef(l)
+ s.Extensions = append(s.Extensions, &types.Extension{Type: input, Loc: loc})
+
+ default:
+ // TODO: Add ScalarTypeDefinition when adding directives
+ l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union" or "input"`, x))
+ }
+}
+
+func parseFieldsDef(l *common.Lexer) types.FieldsDefinition {
+ var fields types.FieldsDefinition
+ for l.Peek() != '}' {
+ f := &types.FieldDefinition{}
+ f.Desc = l.DescComment()
+ f.Loc = l.Location()
+ f.Name = l.ConsumeIdent()
+ if l.Peek() == '(' {
+ l.ConsumeToken('(')
+ for l.Peek() != ')' {
+ f.Arguments = append(f.Arguments, common.ParseInputValue(l))
+ }
+ l.ConsumeToken(')')
+ }
+ l.ConsumeToken(':')
+ f.Type = common.ParseType(l)
+ f.Directives = common.ParseDirectives(l)
+ fields = append(fields, f)
+ }
+ return fields
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go b/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go
new file mode 100644
index 00000000..9702b5f5
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go
@@ -0,0 +1,71 @@
+package validation
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+func makeSuggestion(prefix string, options []string, input string) string {
+ var selected []string
+ distances := make(map[string]int)
+ for _, opt := range options {
+ distance := levenshteinDistance(input, opt)
+ threshold := max(len(input)/2, max(len(opt)/2, 1))
+ if distance < threshold {
+ selected = append(selected, opt)
+ distances[opt] = distance
+ }
+ }
+
+ if len(selected) == 0 {
+ return ""
+ }
+ sort.Slice(selected, func(i, j int) bool {
+ return distances[selected[i]] < distances[selected[j]]
+ })
+
+ parts := make([]string, len(selected))
+ for i, opt := range selected {
+ parts[i] = strconv.Quote(opt)
+ }
+ if len(parts) > 1 {
+ parts[len(parts)-1] = "or " + parts[len(parts)-1]
+ }
+ return fmt.Sprintf(" %s %s?", prefix, strings.Join(parts, ", "))
+}
+
+func levenshteinDistance(s1, s2 string) int {
+ column := make([]int, len(s1)+1)
+ for y := range s1 {
+ column[y+1] = y + 1
+ }
+ for x, rx := range s2 {
+ column[0] = x + 1
+ lastdiag := x
+ for y, ry := range s1 {
+ olddiag := column[y+1]
+ if rx != ry {
+ lastdiag++
+ }
+ column[y+1] = min(column[y+1]+1, min(column[y]+1, lastdiag))
+ lastdiag = olddiag
+ }
+ }
+ return column[len(s1)]
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go b/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go
new file mode 100644
index 00000000..e3672638
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go
@@ -0,0 +1,980 @@
+package validation
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/scanner"
+
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/internal/common"
+ "github.com/graph-gophers/graphql-go/internal/query"
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+type varSet map[*types.InputValueDefinition]struct{}
+
+type selectionPair struct{ a, b types.Selection }
+
+type nameSet map[string]errors.Location
+
+type fieldInfo struct {
+ sf *types.FieldDefinition
+ parent types.NamedType
+}
+
+type context struct {
+ schema *types.Schema
+ doc *types.ExecutableDefinition
+ errs []*errors.QueryError
+ opErrs map[*types.OperationDefinition][]*errors.QueryError
+ usedVars map[*types.OperationDefinition]varSet
+ fieldMap map[*types.Field]fieldInfo
+ overlapValidated map[selectionPair]struct{}
+ maxDepth int
+}
+
+func (c *context) addErr(loc errors.Location, rule string, format string, a ...interface{}) {
+ c.addErrMultiLoc([]errors.Location{loc}, rule, format, a...)
+}
+
+func (c *context) addErrMultiLoc(locs []errors.Location, rule string, format string, a ...interface{}) {
+ c.errs = append(c.errs, &errors.QueryError{
+ Message: fmt.Sprintf(format, a...),
+ Locations: locs,
+ Rule: rule,
+ })
+}
+
+type opContext struct {
+ *context
+ ops []*types.OperationDefinition
+}
+
+func newContext(s *types.Schema, doc *types.ExecutableDefinition, maxDepth int) *context {
+ return &context{
+ schema: s,
+ doc: doc,
+ opErrs: make(map[*types.OperationDefinition][]*errors.QueryError),
+ usedVars: make(map[*types.OperationDefinition]varSet),
+ fieldMap: make(map[*types.Field]fieldInfo),
+ overlapValidated: make(map[selectionPair]struct{}),
+ maxDepth: maxDepth,
+ }
+}
+
+func Validate(s *types.Schema, doc *types.ExecutableDefinition, variables map[string]interface{}, maxDepth int) []*errors.QueryError {
+ c := newContext(s, doc, maxDepth)
+
+ opNames := make(nameSet)
+ fragUsedBy := make(map[*types.FragmentDefinition][]*types.OperationDefinition)
+ for _, op := range doc.Operations {
+ c.usedVars[op] = make(varSet)
+ opc := &opContext{c, []*types.OperationDefinition{op}}
+
+ // Check if max depth is exceeded, if it's set. If max depth is exceeded,
+ // don't continue to validate the document and exit early.
+ if validateMaxDepth(opc, op.Selections, nil, 1) {
+ return c.errs
+ }
+
+ if op.Name.Name == "" && len(doc.Operations) != 1 {
+ c.addErr(op.Loc, "LoneAnonymousOperation", "This anonymous operation must be the only defined operation.")
+ }
+ if op.Name.Name != "" {
+ validateName(c, opNames, op.Name, "UniqueOperationNames", "operation")
+ }
+
+ validateDirectives(opc, string(op.Type), op.Directives)
+
+ varNames := make(nameSet)
+ for _, v := range op.Vars {
+ validateName(c, varNames, v.Name, "UniqueVariableNames", "variable")
+
+ t := resolveType(c, v.Type)
+ if !canBeInput(t) {
+ c.addErr(v.TypeLoc, "VariablesAreInputTypes", "Variable %q cannot be non-input type %q.", "$"+v.Name.Name, t)
+ }
+ validateValue(opc, v, variables[v.Name.Name], t)
+
+ if v.Default != nil {
+ validateLiteral(opc, v.Default)
+
+ if t != nil {
+ if nn, ok := t.(*types.NonNull); ok {
+ c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q is required and will not use the default value. Perhaps you meant to use type %q.", "$"+v.Name.Name, t, nn.OfType)
+ }
+
+ if ok, reason := validateValueType(opc, v.Default, t); !ok {
+ c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q has invalid default value %s.\n%s", "$"+v.Name.Name, t, v.Default, reason)
+ }
+ }
+ }
+ }
+
+ var entryPoint types.NamedType
+ switch op.Type {
+ case query.Query:
+ entryPoint = s.EntryPoints["query"]
+ case query.Mutation:
+ entryPoint = s.EntryPoints["mutation"]
+ case query.Subscription:
+ entryPoint = s.EntryPoints["subscription"]
+ default:
+ panic("unreachable")
+ }
+
+ validateSelectionSet(opc, op.Selections, entryPoint)
+
+ fragUsed := make(map[*types.FragmentDefinition]struct{})
+ markUsedFragments(c, op.Selections, fragUsed)
+ for frag := range fragUsed {
+ fragUsedBy[frag] = append(fragUsedBy[frag], op)
+ }
+ }
+
+ fragNames := make(nameSet)
+ fragVisited := make(map[*types.FragmentDefinition]struct{})
+ for _, frag := range doc.Fragments {
+ opc := &opContext{c, fragUsedBy[frag]}
+
+ validateName(c, fragNames, frag.Name, "UniqueFragmentNames", "fragment")
+ validateDirectives(opc, "FRAGMENT_DEFINITION", frag.Directives)
+
+ t := unwrapType(resolveType(c, &frag.On))
+ // continue even if t is nil
+ if t != nil && !canBeFragment(t) {
+ c.addErr(frag.On.Loc, "FragmentsOnCompositeTypes", "Fragment %q cannot condition on non composite type %q.", frag.Name.Name, t)
+ continue
+ }
+
+ validateSelectionSet(opc, frag.Selections, t)
+
+ if _, ok := fragVisited[frag]; !ok {
+ detectFragmentCycle(c, frag.Selections, fragVisited, nil, map[string]int{frag.Name.Name: 0})
+ }
+ }
+
+ for _, frag := range doc.Fragments {
+ if len(fragUsedBy[frag]) == 0 {
+ c.addErr(frag.Loc, "NoUnusedFragments", "Fragment %q is never used.", frag.Name.Name)
+ }
+ }
+
+ for _, op := range doc.Operations {
+ c.errs = append(c.errs, c.opErrs[op]...)
+
+ opUsedVars := c.usedVars[op]
+ for _, v := range op.Vars {
+ if _, ok := opUsedVars[v]; !ok {
+ opSuffix := ""
+ if op.Name.Name != "" {
+ opSuffix = fmt.Sprintf(" in operation %q", op.Name.Name)
+ }
+ c.addErr(v.Loc, "NoUnusedVariables", "Variable %q is never used%s.", "$"+v.Name.Name, opSuffix)
+ }
+ }
+ }
+
+ return c.errs
+}
+
+func validateValue(c *opContext, v *types.InputValueDefinition, val interface{}, t types.Type) {
+ switch t := t.(type) {
+ case *types.NonNull:
+ if val == nil {
+ c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid value null.\nExpected type \"%s\", found null.", v.Name.Name, t)
+ return
+ }
+ validateValue(c, v, val, t.OfType)
+ case *types.List:
+ if val == nil {
+ return
+ }
+ vv, ok := val.([]interface{})
+ if !ok {
+ // Input coercion rules allow single items without wrapping array
+ validateValue(c, v, val, t.OfType)
+ return
+ }
+ for _, elem := range vv {
+ validateValue(c, v, elem, t.OfType)
+ }
+ case *types.EnumTypeDefinition:
+ if val == nil {
+ return
+ }
+ e, ok := val.(string)
+ if !ok {
+ c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid type %T.\nExpected type \"%s\", found %v.", v.Name.Name, val, t, val)
+ return
+ }
+ for _, option := range t.EnumValuesDefinition {
+ if option.EnumValue == e {
+ return
+ }
+ }
+ c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid value %s.\nExpected type \"%s\", found %s.", v.Name.Name, e, t, e)
+ case *types.InputObject:
+ if val == nil {
+ return
+ }
+ in, ok := val.(map[string]interface{})
+ if !ok {
+ c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid type %T.\nExpected type \"%s\", found %s.", v.Name.Name, val, t, val)
+ return
+ }
+ for _, f := range t.Values {
+ fieldVal := in[f.Name.Name]
+ validateValue(c, f, fieldVal, f.Type)
+ }
+ }
+}
+
+// validates the query doesn't go deeper than maxDepth (if set). Returns whether
+// or not query validated max depth to avoid excessive recursion.
+//
+// The visited map is necessary to ensure that max depth validation does not get stuck in cyclical
+// fragment spreads.
+func validateMaxDepth(c *opContext, sels []types.Selection, visited map[*types.FragmentDefinition]struct{}, depth int) bool {
+ // maxDepth checking is turned off when maxDepth is 0
+ if c.maxDepth == 0 {
+ return false
+ }
+
+ exceededMaxDepth := false
+ if visited == nil {
+ visited = map[*types.FragmentDefinition]struct{}{}
+ }
+
+ for _, sel := range sels {
+ switch sel := sel.(type) {
+ case *types.Field:
+ if depth > c.maxDepth {
+ exceededMaxDepth = true
+ c.addErr(sel.Alias.Loc, "MaxDepthExceeded", "Field %q has depth %d that exceeds max depth %d", sel.Name.Name, depth, c.maxDepth)
+ continue
+ }
+ exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, sel.SelectionSet, visited, depth+1)
+
+ case *types.InlineFragment:
+ // Depth is not checked because inline fragments resolve to other fields which are checked.
+ // Depth is not incremented because inline fragments have the same depth as neighboring fields
+ exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, sel.Selections, visited, depth)
+ case *types.FragmentSpread:
+ // Depth is not checked because fragments resolve to other fields which are checked.
+ frag := c.doc.Fragments.Get(sel.Name.Name)
+ if frag == nil {
+ // In case of unknown fragment (invalid request), ignore max depth evaluation
+ c.addErr(sel.Loc, "MaxDepthEvaluationError", "Unknown fragment %q. Unable to evaluate depth.", sel.Name.Name)
+ continue
+ }
+
+ if _, ok := visited[frag]; ok {
+ // we've already seen this fragment, don't check depth again.
+ continue
+ }
+ visited[frag] = struct{}{}
+
+ // Depth is not incremented because fragments have the same depth as surrounding fields
+ exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, frag.Selections, visited, depth)
+ }
+ }
+
+ return exceededMaxDepth
+}
+
+func validateSelectionSet(c *opContext, sels []types.Selection, t types.NamedType) {
+ for _, sel := range sels {
+ validateSelection(c, sel, t)
+ }
+
+ for i, a := range sels {
+ for _, b := range sels[i+1:] {
+ c.validateOverlap(a, b, nil, nil)
+ }
+ }
+}
+
+func validateSelection(c *opContext, sel types.Selection, t types.NamedType) {
+ switch sel := sel.(type) {
+ case *types.Field:
+ validateDirectives(c, "FIELD", sel.Directives)
+
+ fieldName := sel.Name.Name
+ var f *types.FieldDefinition
+ switch fieldName {
+ case "__typename":
+ f = &types.FieldDefinition{
+ Name: "__typename",
+ Type: c.schema.Types["String"],
+ }
+ case "__schema":
+ f = &types.FieldDefinition{
+ Name: "__schema",
+ Type: c.schema.Types["__Schema"],
+ }
+ case "__type":
+ f = &types.FieldDefinition{
+ Name: "__type",
+ Arguments: types.ArgumentsDefinition{
+ &types.InputValueDefinition{
+ Name: types.Ident{Name: "name"},
+ Type: &types.NonNull{OfType: c.schema.Types["String"]},
+ },
+ },
+ Type: c.schema.Types["__Type"],
+ }
+ default:
+ f = fields(t).Get(fieldName)
+ if f == nil && t != nil {
+ suggestion := makeSuggestion("Did you mean", fields(t).Names(), fieldName)
+ c.addErr(sel.Alias.Loc, "FieldsOnCorrectType", "Cannot query field %q on type %q.%s", fieldName, t, suggestion)
+ }
+ }
+ c.fieldMap[sel] = fieldInfo{sf: f, parent: t}
+
+ validateArgumentLiterals(c, sel.Arguments)
+ if f != nil {
+ validateArgumentTypes(c, sel.Arguments, f.Arguments, sel.Alias.Loc,
+ func() string { return fmt.Sprintf("field %q of type %q", fieldName, t) },
+ func() string { return fmt.Sprintf("Field %q", fieldName) },
+ )
+ }
+
+ var ft types.Type
+ if f != nil {
+ ft = f.Type
+ sf := hasSubfields(ft)
+ if sf && sel.SelectionSet == nil {
+ c.addErr(sel.Alias.Loc, "ScalarLeafs", "Field %q of type %q must have a selection of subfields. Did you mean \"%s { ... }\"?", fieldName, ft, fieldName)
+ }
+ if !sf && sel.SelectionSet != nil {
+ c.addErr(sel.SelectionSetLoc, "ScalarLeafs", "Field %q must not have a selection since type %q has no subfields.", fieldName, ft)
+ }
+ }
+ if sel.SelectionSet != nil {
+ validateSelectionSet(c, sel.SelectionSet, unwrapType(ft))
+ }
+
+ case *types.InlineFragment:
+ validateDirectives(c, "INLINE_FRAGMENT", sel.Directives)
+ if sel.On.Name != "" {
+ fragTyp := unwrapType(resolveType(c.context, &sel.On))
+ if fragTyp != nil && !compatible(t, fragTyp) {
+ c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment cannot be spread here as objects of type %q can never be of type %q.", t, fragTyp)
+ }
+ t = fragTyp
+ // continue even if t is nil
+ }
+ if t != nil && !canBeFragment(t) {
+ c.addErr(sel.On.Loc, "FragmentsOnCompositeTypes", "Fragment cannot condition on non composite type %q.", t)
+ return
+ }
+ validateSelectionSet(c, sel.Selections, unwrapType(t))
+
+ case *types.FragmentSpread:
+ validateDirectives(c, "FRAGMENT_SPREAD", sel.Directives)
+ frag := c.doc.Fragments.Get(sel.Name.Name)
+ if frag == nil {
+ c.addErr(sel.Name.Loc, "KnownFragmentNames", "Unknown fragment %q.", sel.Name.Name)
+ return
+ }
+ fragTyp := c.schema.Types[frag.On.Name]
+ if !compatible(t, fragTyp) {
+ c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment %q cannot be spread here as objects of type %q can never be of type %q.", frag.Name.Name, t, fragTyp)
+ }
+
+ default:
+ panic("unreachable")
+ }
+}
+
+func compatible(a, b types.Type) bool {
+ for _, pta := range possibleTypes(a) {
+ for _, ptb := range possibleTypes(b) {
+ if pta == ptb {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func possibleTypes(t types.Type) []*types.ObjectTypeDefinition {
+ switch t := t.(type) {
+ case *types.ObjectTypeDefinition:
+ return []*types.ObjectTypeDefinition{t}
+ case *types.InterfaceTypeDefinition:
+ return t.PossibleTypes
+ case *types.Union:
+ return t.UnionMemberTypes
+ default:
+ return nil
+ }
+}
+
+func markUsedFragments(c *context, sels []types.Selection, fragUsed map[*types.FragmentDefinition]struct{}) {
+ for _, sel := range sels {
+ switch sel := sel.(type) {
+ case *types.Field:
+ if sel.SelectionSet != nil {
+ markUsedFragments(c, sel.SelectionSet, fragUsed)
+ }
+
+ case *types.InlineFragment:
+ markUsedFragments(c, sel.Selections, fragUsed)
+
+ case *types.FragmentSpread:
+ frag := c.doc.Fragments.Get(sel.Name.Name)
+ if frag == nil {
+ return
+ }
+
+ if _, ok := fragUsed[frag]; ok {
+ continue
+ }
+
+ fragUsed[frag] = struct{}{}
+ markUsedFragments(c, frag.Selections, fragUsed)
+
+ default:
+ panic("unreachable")
+ }
+ }
+}
+
+func detectFragmentCycle(c *context, sels []types.Selection, fragVisited map[*types.FragmentDefinition]struct{}, spreadPath []*types.FragmentSpread, spreadPathIndex map[string]int) {
+ for _, sel := range sels {
+ detectFragmentCycleSel(c, sel, fragVisited, spreadPath, spreadPathIndex)
+ }
+}
+
+func detectFragmentCycleSel(c *context, sel types.Selection, fragVisited map[*types.FragmentDefinition]struct{}, spreadPath []*types.FragmentSpread, spreadPathIndex map[string]int) {
+ switch sel := sel.(type) {
+ case *types.Field:
+ if sel.SelectionSet != nil {
+ detectFragmentCycle(c, sel.SelectionSet, fragVisited, spreadPath, spreadPathIndex)
+ }
+
+ case *types.InlineFragment:
+ detectFragmentCycle(c, sel.Selections, fragVisited, spreadPath, spreadPathIndex)
+
+ case *types.FragmentSpread:
+ frag := c.doc.Fragments.Get(sel.Name.Name)
+ if frag == nil {
+ return
+ }
+
+ spreadPath = append(spreadPath, sel)
+ if i, ok := spreadPathIndex[frag.Name.Name]; ok {
+ cyclePath := spreadPath[i:]
+ via := ""
+ if len(cyclePath) > 1 {
+ names := make([]string, len(cyclePath)-1)
+ for i, frag := range cyclePath[:len(cyclePath)-1] {
+ names[i] = frag.Name.Name
+ }
+ via = " via " + strings.Join(names, ", ")
+ }
+
+ locs := make([]errors.Location, len(cyclePath))
+ for i, frag := range cyclePath {
+ locs[i] = frag.Loc
+ }
+ c.addErrMultiLoc(locs, "NoFragmentCycles", "Cannot spread fragment %q within itself%s.", frag.Name.Name, via)
+ return
+ }
+
+ if _, ok := fragVisited[frag]; ok {
+ return
+ }
+ fragVisited[frag] = struct{}{}
+
+ spreadPathIndex[frag.Name.Name] = len(spreadPath)
+ detectFragmentCycle(c, frag.Selections, fragVisited, spreadPath, spreadPathIndex)
+ delete(spreadPathIndex, frag.Name.Name)
+
+ default:
+ panic("unreachable")
+ }
+}
+
+func (c *context) validateOverlap(a, b types.Selection, reasons *[]string, locs *[]errors.Location) {
+ if a == b {
+ return
+ }
+
+ if _, ok := c.overlapValidated[selectionPair{a, b}]; ok {
+ return
+ }
+ c.overlapValidated[selectionPair{a, b}] = struct{}{}
+ c.overlapValidated[selectionPair{b, a}] = struct{}{}
+
+ switch a := a.(type) {
+ case *types.Field:
+ switch b := b.(type) {
+ case *types.Field:
+ if b.Alias.Loc.Before(a.Alias.Loc) {
+ a, b = b, a
+ }
+ if reasons2, locs2 := c.validateFieldOverlap(a, b); len(reasons2) != 0 {
+ locs2 = append(locs2, a.Alias.Loc, b.Alias.Loc)
+ if reasons == nil {
+ c.addErrMultiLoc(locs2, "OverlappingFieldsCanBeMerged", "Fields %q conflict because %s. Use different aliases on the fields to fetch both if this was intentional.", a.Alias.Name, strings.Join(reasons2, " and "))
+ return
+ }
+ for _, r := range reasons2 {
+ *reasons = append(*reasons, fmt.Sprintf("subfields %q conflict because %s", a.Alias.Name, r))
+ }
+ *locs = append(*locs, locs2...)
+ }
+
+ case *types.InlineFragment:
+ for _, sel := range b.Selections {
+ c.validateOverlap(a, sel, reasons, locs)
+ }
+
+ case *types.FragmentSpread:
+ if frag := c.doc.Fragments.Get(b.Name.Name); frag != nil {
+ for _, sel := range frag.Selections {
+ c.validateOverlap(a, sel, reasons, locs)
+ }
+ }
+
+ default:
+ panic("unreachable")
+ }
+
+ case *types.InlineFragment:
+ for _, sel := range a.Selections {
+ c.validateOverlap(sel, b, reasons, locs)
+ }
+
+ case *types.FragmentSpread:
+ if frag := c.doc.Fragments.Get(a.Name.Name); frag != nil {
+ for _, sel := range frag.Selections {
+ c.validateOverlap(sel, b, reasons, locs)
+ }
+ }
+
+ default:
+ panic("unreachable")
+ }
+}
+
+func (c *context) validateFieldOverlap(a, b *types.Field) ([]string, []errors.Location) {
+ if a.Alias.Name != b.Alias.Name {
+ return nil, nil
+ }
+
+ if asf := c.fieldMap[a].sf; asf != nil {
+ if bsf := c.fieldMap[b].sf; bsf != nil {
+ if !typesCompatible(asf.Type, bsf.Type) {
+ return []string{fmt.Sprintf("they return conflicting types %s and %s", asf.Type, bsf.Type)}, nil
+ }
+ }
+ }
+
+ at := c.fieldMap[a].parent
+ bt := c.fieldMap[b].parent
+ if at == nil || bt == nil || at == bt {
+ if a.Name.Name != b.Name.Name {
+ return []string{fmt.Sprintf("%s and %s are different fields", a.Name.Name, b.Name.Name)}, nil
+ }
+
+ if argumentsConflict(a.Arguments, b.Arguments) {
+ return []string{"they have differing arguments"}, nil
+ }
+ }
+
+ var reasons []string
+ var locs []errors.Location
+ for _, a2 := range a.SelectionSet {
+ for _, b2 := range b.SelectionSet {
+ c.validateOverlap(a2, b2, &reasons, &locs)
+ }
+ }
+ return reasons, locs
+}
+
+func argumentsConflict(a, b types.ArgumentList) bool {
+ if len(a) != len(b) {
+ return true
+ }
+ for _, argA := range a {
+ valB, ok := b.Get(argA.Name.Name)
+ if !ok || !reflect.DeepEqual(argA.Value.Deserialize(nil), valB.Deserialize(nil)) {
+ return true
+ }
+ }
+ return false
+}
+
+func fields(t types.Type) types.FieldsDefinition {
+ switch t := t.(type) {
+ case *types.ObjectTypeDefinition:
+ return t.Fields
+ case *types.InterfaceTypeDefinition:
+ return t.Fields
+ default:
+ return nil
+ }
+}
+
+func unwrapType(t types.Type) types.NamedType {
+ if t == nil {
+ return nil
+ }
+ for {
+ switch t2 := t.(type) {
+ case types.NamedType:
+ return t2
+ case *types.List:
+ t = t2.OfType
+ case *types.NonNull:
+ t = t2.OfType
+ default:
+ panic("unreachable")
+ }
+ }
+}
+
+func resolveType(c *context, t types.Type) types.Type {
+ t2, err := common.ResolveType(t, c.schema.Resolve)
+ if err != nil {
+ c.errs = append(c.errs, err)
+ }
+ return t2
+}
+
+func validateDirectives(c *opContext, loc string, directives types.DirectiveList) {
+ directiveNames := make(nameSet)
+ for _, d := range directives {
+ dirName := d.Name.Name
+ validateNameCustomMsg(c.context, directiveNames, d.Name, "UniqueDirectivesPerLocation", func() string {
+ return fmt.Sprintf("The directive %q can only be used once at this location.", dirName)
+ })
+
+ validateArgumentLiterals(c, d.Arguments)
+
+ dd, ok := c.schema.Directives[dirName]
+ if !ok {
+ c.addErr(d.Name.Loc, "KnownDirectives", "Unknown directive %q.", dirName)
+ continue
+ }
+
+ locOK := false
+ for _, allowedLoc := range dd.Locations {
+ if loc == allowedLoc {
+ locOK = true
+ break
+ }
+ }
+ if !locOK {
+ c.addErr(d.Name.Loc, "KnownDirectives", "Directive %q may not be used on %s.", dirName, loc)
+ }
+
+ validateArgumentTypes(c, d.Arguments, dd.Arguments, d.Name.Loc,
+ func() string { return fmt.Sprintf("directive %q", "@"+dirName) },
+ func() string { return fmt.Sprintf("Directive %q", "@"+dirName) },
+ )
+ }
+}
+
+func validateName(c *context, set nameSet, name types.Ident, rule string, kind string) {
+ validateNameCustomMsg(c, set, name, rule, func() string {
+ return fmt.Sprintf("There can be only one %s named %q.", kind, name.Name)
+ })
+}
+
+func validateNameCustomMsg(c *context, set nameSet, name types.Ident, rule string, msg func() string) {
+ if loc, ok := set[name.Name]; ok {
+ c.addErrMultiLoc([]errors.Location{loc, name.Loc}, rule, msg())
+ return
+ }
+ set[name.Name] = name.Loc
+}
+
+func validateArgumentTypes(c *opContext, args types.ArgumentList, argDecls types.ArgumentsDefinition, loc errors.Location, owner1, owner2 func() string) {
+ for _, selArg := range args {
+ arg := argDecls.Get(selArg.Name.Name)
+ if arg == nil {
+ c.addErr(selArg.Name.Loc, "KnownArgumentNames", "Unknown argument %q on %s.", selArg.Name.Name, owner1())
+ continue
+ }
+ value := selArg.Value
+ if ok, reason := validateValueType(c, value, arg.Type); !ok {
+ c.addErr(value.Location(), "ArgumentsOfCorrectType", "Argument %q has invalid value %s.\n%s", arg.Name.Name, value, reason)
+ }
+ }
+ for _, decl := range argDecls {
+ if _, ok := decl.Type.(*types.NonNull); ok {
+ if _, ok := args.Get(decl.Name.Name); !ok {
+ c.addErr(loc, "ProvidedNonNullArguments", "%s argument %q of type %q is required but not provided.", owner2(), decl.Name.Name, decl.Type)
+ }
+ }
+ }
+}
+
+func validateArgumentLiterals(c *opContext, args types.ArgumentList) {
+ argNames := make(nameSet)
+ for _, arg := range args {
+ validateName(c.context, argNames, arg.Name, "UniqueArgumentNames", "argument")
+ validateLiteral(c, arg.Value)
+ }
+}
+
+func validateLiteral(c *opContext, l types.Value) {
+ switch l := l.(type) {
+ case *types.ObjectValue:
+ fieldNames := make(nameSet)
+ for _, f := range l.Fields {
+ validateName(c.context, fieldNames, f.Name, "UniqueInputFieldNames", "input field")
+ validateLiteral(c, f.Value)
+ }
+ case *types.ListValue:
+ for _, entry := range l.Values {
+ validateLiteral(c, entry)
+ }
+ case *types.Variable:
+ for _, op := range c.ops {
+ v := op.Vars.Get(l.Name)
+ if v == nil {
+ byOp := ""
+ if op.Name.Name != "" {
+ byOp = fmt.Sprintf(" by operation %q", op.Name.Name)
+ }
+ c.opErrs[op] = append(c.opErrs[op], &errors.QueryError{
+ Message: fmt.Sprintf("Variable %q is not defined%s.", "$"+l.Name, byOp),
+ Locations: []errors.Location{l.Loc, op.Loc},
+ Rule: "NoUndefinedVariables",
+ })
+ continue
+ }
+ validateValueType(c, l, resolveType(c.context, v.Type))
+ c.usedVars[op][v] = struct{}{}
+ }
+ }
+}
+
+func validateValueType(c *opContext, v types.Value, t types.Type) (bool, string) {
+ if v, ok := v.(*types.Variable); ok {
+ for _, op := range c.ops {
+ if v2 := op.Vars.Get(v.Name); v2 != nil {
+ t2, err := common.ResolveType(v2.Type, c.schema.Resolve)
+ if _, ok := t2.(*types.NonNull); !ok && v2.Default != nil {
+ t2 = &types.NonNull{OfType: t2}
+ }
+ if err == nil && !typeCanBeUsedAs(t2, t) {
+ c.addErrMultiLoc([]errors.Location{v2.Loc, v.Loc}, "VariablesInAllowedPosition", "Variable %q of type %q used in position expecting type %q.", "$"+v.Name, t2, t)
+ }
+ }
+ }
+ return true, ""
+ }
+
+ if nn, ok := t.(*types.NonNull); ok {
+ if isNull(v) {
+ return false, fmt.Sprintf("Expected %q, found null.", t)
+ }
+ t = nn.OfType
+ }
+ if isNull(v) {
+ return true, ""
+ }
+
+ switch t := t.(type) {
+ case *types.ScalarTypeDefinition, *types.EnumTypeDefinition:
+ if lit, ok := v.(*types.PrimitiveValue); ok {
+ if validateBasicLit(lit, t) {
+ return true, ""
+ }
+ return false, fmt.Sprintf("Expected type %q, found %s.", t, v)
+ }
+ return true, ""
+
+ case *types.List:
+ list, ok := v.(*types.ListValue)
+ if !ok {
+ return validateValueType(c, v, t.OfType) // single value instead of list
+ }
+ for i, entry := range list.Values {
+ if ok, reason := validateValueType(c, entry, t.OfType); !ok {
+ return false, fmt.Sprintf("In element #%d: %s", i, reason)
+ }
+ }
+ return true, ""
+
+ case *types.InputObject:
+ v, ok := v.(*types.ObjectValue)
+ if !ok {
+ return false, fmt.Sprintf("Expected %q, found not an object.", t)
+ }
+ for _, f := range v.Fields {
+ name := f.Name.Name
+ iv := t.Values.Get(name)
+ if iv == nil {
+ return false, fmt.Sprintf("In field %q: Unknown field.", name)
+ }
+ if ok, reason := validateValueType(c, f.Value, iv.Type); !ok {
+ return false, fmt.Sprintf("In field %q: %s", name, reason)
+ }
+ }
+ for _, iv := range t.Values {
+ found := false
+ for _, f := range v.Fields {
+ if f.Name.Name == iv.Name.Name {
+ found = true
+ break
+ }
+ }
+ if !found {
+ if _, ok := iv.Type.(*types.NonNull); ok && iv.Default == nil {
+ return false, fmt.Sprintf("In field %q: Expected %q, found null.", iv.Name.Name, iv.Type)
+ }
+ }
+ }
+ return true, ""
+ }
+
+ return false, fmt.Sprintf("Expected type %q, found %s.", t, v)
+}
+
+func validateBasicLit(v *types.PrimitiveValue, t types.Type) bool {
+ switch t := t.(type) {
+ case *types.ScalarTypeDefinition:
+ switch t.Name {
+ case "Int":
+ if v.Type != scanner.Int {
+ return false
+ }
+ f, err := strconv.ParseFloat(v.Text, 64)
+ if err != nil {
+ panic(err)
+ }
+ return f >= math.MinInt32 && f <= math.MaxInt32
+ case "Float":
+ return v.Type == scanner.Int || v.Type == scanner.Float
+ case "String":
+ return v.Type == scanner.String
+ case "Boolean":
+ return v.Type == scanner.Ident && (v.Text == "true" || v.Text == "false")
+ case "ID":
+ return v.Type == scanner.Int || v.Type == scanner.String
+ default:
+ //TODO: Type-check against expected type by Unmarshalling
+ return true
+ }
+
+ case *types.EnumTypeDefinition:
+ if v.Type != scanner.Ident {
+ return false
+ }
+ for _, option := range t.EnumValuesDefinition {
+ if option.EnumValue == v.Text {
+ return true
+ }
+ }
+ return false
+ }
+
+ return false
+}
+
+func canBeFragment(t types.Type) bool {
+ switch t.(type) {
+ case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union:
+ return true
+ default:
+ return false
+ }
+}
+
+func canBeInput(t types.Type) bool {
+ switch t := t.(type) {
+ case *types.InputObject, *types.ScalarTypeDefinition, *types.EnumTypeDefinition:
+ return true
+ case *types.List:
+ return canBeInput(t.OfType)
+ case *types.NonNull:
+ return canBeInput(t.OfType)
+ default:
+ return false
+ }
+}
+
+func hasSubfields(t types.Type) bool {
+ switch t := t.(type) {
+ case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union:
+ return true
+ case *types.List:
+ return hasSubfields(t.OfType)
+ case *types.NonNull:
+ return hasSubfields(t.OfType)
+ default:
+ return false
+ }
+}
+
+func isLeaf(t types.Type) bool {
+ switch t.(type) {
+ case *types.ScalarTypeDefinition, *types.EnumTypeDefinition:
+ return true
+ default:
+ return false
+ }
+}
+
+func isNull(lit interface{}) bool {
+ _, ok := lit.(*types.NullValue)
+ return ok
+}
+
+func typesCompatible(a, b types.Type) bool {
+ al, aIsList := a.(*types.List)
+ bl, bIsList := b.(*types.List)
+ if aIsList || bIsList {
+ return aIsList && bIsList && typesCompatible(al.OfType, bl.OfType)
+ }
+
+ ann, aIsNN := a.(*types.NonNull)
+ bnn, bIsNN := b.(*types.NonNull)
+ if aIsNN || bIsNN {
+ return aIsNN && bIsNN && typesCompatible(ann.OfType, bnn.OfType)
+ }
+
+ if isLeaf(a) || isLeaf(b) {
+ return a == b
+ }
+
+ return true
+}
+
+func typeCanBeUsedAs(t, as types.Type) bool {
+ nnT, okT := t.(*types.NonNull)
+ if okT {
+ t = nnT.OfType
+ }
+
+ nnAs, okAs := as.(*types.NonNull)
+ if okAs {
+ as = nnAs.OfType
+ if !okT {
+ return false // nullable can not be used as non-null
+ }
+ }
+
+ if t == as {
+ return true
+ }
+
+ if lT, ok := t.(*types.List); ok {
+ if lAs, ok := as.(*types.List); ok {
+ return typeCanBeUsedAs(lT.OfType, lAs.OfType)
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/introspection.go b/vendor/github.com/graph-gophers/graphql-go/introspection.go
new file mode 100644
index 00000000..6877bcaf
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/introspection.go
@@ -0,0 +1,118 @@
+package graphql
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
+ "github.com/graph-gophers/graphql-go/introspection"
+)
+
+// Inspect allows inspection of the given schema.
+func (s *Schema) Inspect() *introspection.Schema {
+ return introspection.WrapSchema(s.schema)
+}
+
+// ToJSON encodes the schema in a JSON format used by tools like Relay.
+func (s *Schema) ToJSON() ([]byte, error) {
+ result := s.exec(context.Background(), introspectionQuery, "", nil, &resolvable.Schema{
+ Meta: s.res.Meta,
+ Query: &resolvable.Object{},
+ Schema: *s.schema,
+ })
+ if len(result.Errors) != 0 {
+ panic(result.Errors[0])
+ }
+ return json.MarshalIndent(result.Data, "", "\t")
+}
+
+var introspectionQuery = `
+ query {
+ __schema {
+ queryType { name }
+ mutationType { name }
+ subscriptionType { name }
+ types {
+ ...FullType
+ }
+ directives {
+ name
+ description
+ locations
+ args {
+ ...InputValue
+ }
+ }
+ }
+ }
+ fragment FullType on __Type {
+ kind
+ name
+ description
+ fields(includeDeprecated: true) {
+ name
+ description
+ args {
+ ...InputValue
+ }
+ type {
+ ...TypeRef
+ }
+ isDeprecated
+ deprecationReason
+ }
+ inputFields {
+ ...InputValue
+ }
+ interfaces {
+ ...TypeRef
+ }
+ enumValues(includeDeprecated: true) {
+ name
+ description
+ isDeprecated
+ deprecationReason
+ }
+ possibleTypes {
+ ...TypeRef
+ }
+ }
+ fragment InputValue on __InputValue {
+ name
+ description
+ type { ...TypeRef }
+ defaultValue
+ }
+ fragment TypeRef on __Type {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ ofType {
+ kind
+ name
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+`
diff --git a/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go b/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go
new file mode 100644
index 00000000..a0a2fa9b
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go
@@ -0,0 +1,312 @@
+package introspection
+
+import (
+ "sort"
+
+ "github.com/graph-gophers/graphql-go/types"
+)
+
+type Schema struct {
+ schema *types.Schema
+}
+
+// WrapSchema is only used internally.
+func WrapSchema(schema *types.Schema) *Schema {
+ return &Schema{schema}
+}
+
+func (r *Schema) Types() []*Type {
+ var names []string
+ for name := range r.schema.Types {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ l := make([]*Type, len(names))
+ for i, name := range names {
+ l[i] = &Type{r.schema.Types[name]}
+ }
+ return l
+}
+
+func (r *Schema) Directives() []*Directive {
+ var names []string
+ for name := range r.schema.Directives {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ l := make([]*Directive, len(names))
+ for i, name := range names {
+ l[i] = &Directive{r.schema.Directives[name]}
+ }
+ return l
+}
+
+func (r *Schema) QueryType() *Type {
+ t, ok := r.schema.EntryPoints["query"]
+ if !ok {
+ return nil
+ }
+ return &Type{t}
+}
+
+func (r *Schema) MutationType() *Type {
+ t, ok := r.schema.EntryPoints["mutation"]
+ if !ok {
+ return nil
+ }
+ return &Type{t}
+}
+
+func (r *Schema) SubscriptionType() *Type {
+ t, ok := r.schema.EntryPoints["subscription"]
+ if !ok {
+ return nil
+ }
+ return &Type{t}
+}
+
+type Type struct {
+ typ types.Type
+}
+
+// WrapType is only used internally.
+func WrapType(typ types.Type) *Type {
+ return &Type{typ}
+}
+
+func (r *Type) Kind() string {
+ return r.typ.Kind()
+}
+
+func (r *Type) Name() *string {
+ if named, ok := r.typ.(types.NamedType); ok {
+ name := named.TypeName()
+ return &name
+ }
+ return nil
+}
+
+func (r *Type) Description() *string {
+ if named, ok := r.typ.(types.NamedType); ok {
+ desc := named.Description()
+ if desc == "" {
+ return nil
+ }
+ return &desc
+ }
+ return nil
+}
+
+func (r *Type) Fields(args *struct{ IncludeDeprecated bool }) *[]*Field {
+ var fields types.FieldsDefinition
+ switch t := r.typ.(type) {
+ case *types.ObjectTypeDefinition:
+ fields = t.Fields
+ case *types.InterfaceTypeDefinition:
+ fields = t.Fields
+ default:
+ return nil
+ }
+
+ var l []*Field
+ for _, f := range fields {
+ if d := f.Directives.Get("deprecated"); d == nil || args.IncludeDeprecated {
+ l = append(l, &Field{field: f})
+ }
+ }
+ return &l
+}
+
+func (r *Type) Interfaces() *[]*Type {
+ t, ok := r.typ.(*types.ObjectTypeDefinition)
+ if !ok {
+ return nil
+ }
+
+ l := make([]*Type, len(t.Interfaces))
+ for i, intf := range t.Interfaces {
+ l[i] = &Type{intf}
+ }
+ return &l
+}
+
+func (r *Type) PossibleTypes() *[]*Type {
+ var possibleTypes []*types.ObjectTypeDefinition
+ switch t := r.typ.(type) {
+ case *types.InterfaceTypeDefinition:
+ possibleTypes = t.PossibleTypes
+ case *types.Union:
+ possibleTypes = t.UnionMemberTypes
+ default:
+ return nil
+ }
+
+ l := make([]*Type, len(possibleTypes))
+ for i, intf := range possibleTypes {
+ l[i] = &Type{intf}
+ }
+ return &l
+}
+
+func (r *Type) EnumValues(args *struct{ IncludeDeprecated bool }) *[]*EnumValue {
+ t, ok := r.typ.(*types.EnumTypeDefinition)
+ if !ok {
+ return nil
+ }
+
+ var l []*EnumValue
+ for _, v := range t.EnumValuesDefinition {
+ if d := v.Directives.Get("deprecated"); d == nil || args.IncludeDeprecated {
+ l = append(l, &EnumValue{v})
+ }
+ }
+ return &l
+}
+
+func (r *Type) InputFields() *[]*InputValue {
+ t, ok := r.typ.(*types.InputObject)
+ if !ok {
+ return nil
+ }
+
+ l := make([]*InputValue, len(t.Values))
+ for i, v := range t.Values {
+ l[i] = &InputValue{v}
+ }
+ return &l
+}
+
+func (r *Type) OfType() *Type {
+ switch t := r.typ.(type) {
+ case *types.List:
+ return &Type{t.OfType}
+ case *types.NonNull:
+ return &Type{t.OfType}
+ default:
+ return nil
+ }
+}
+
+type Field struct {
+ field *types.FieldDefinition
+}
+
+func (r *Field) Name() string {
+ return r.field.Name
+}
+
+func (r *Field) Description() *string {
+ if r.field.Desc == "" {
+ return nil
+ }
+ return &r.field.Desc
+}
+
+func (r *Field) Args() []*InputValue {
+ l := make([]*InputValue, len(r.field.Arguments))
+ for i, v := range r.field.Arguments {
+ l[i] = &InputValue{v}
+ }
+ return l
+}
+
+func (r *Field) Type() *Type {
+ return &Type{r.field.Type}
+}
+
+func (r *Field) IsDeprecated() bool {
+ return r.field.Directives.Get("deprecated") != nil
+}
+
+func (r *Field) DeprecationReason() *string {
+ d := r.field.Directives.Get("deprecated")
+ if d == nil {
+ return nil
+ }
+ reason := d.Arguments.MustGet("reason").Deserialize(nil).(string)
+ return &reason
+}
+
+type InputValue struct {
+ value *types.InputValueDefinition
+}
+
+func (r *InputValue) Name() string {
+ return r.value.Name.Name
+}
+
+func (r *InputValue) Description() *string {
+ if r.value.Desc == "" {
+ return nil
+ }
+ return &r.value.Desc
+}
+
+func (r *InputValue) Type() *Type {
+ return &Type{r.value.Type}
+}
+
+func (r *InputValue) DefaultValue() *string {
+ if r.value.Default == nil {
+ return nil
+ }
+ s := r.value.Default.String()
+ return &s
+}
+
+type EnumValue struct {
+ value *types.EnumValueDefinition
+}
+
+func (r *EnumValue) Name() string {
+ return r.value.EnumValue
+}
+
+func (r *EnumValue) Description() *string {
+ if r.value.Desc == "" {
+ return nil
+ }
+ return &r.value.Desc
+}
+
+func (r *EnumValue) IsDeprecated() bool {
+ return r.value.Directives.Get("deprecated") != nil
+}
+
+func (r *EnumValue) DeprecationReason() *string {
+ d := r.value.Directives.Get("deprecated")
+ if d == nil {
+ return nil
+ }
+ reason := d.Arguments.MustGet("reason").Deserialize(nil).(string)
+ return &reason
+}
+
+type Directive struct {
+ directive *types.DirectiveDefinition
+}
+
+func (r *Directive) Name() string {
+ return r.directive.Name
+}
+
+func (r *Directive) Description() *string {
+ if r.directive.Desc == "" {
+ return nil
+ }
+ return &r.directive.Desc
+}
+
+func (r *Directive) Locations() []string {
+ return r.directive.Locations
+}
+
+func (r *Directive) Args() []*InputValue {
+ l := make([]*InputValue, len(r.directive.Arguments))
+ for i, v := range r.directive.Arguments {
+ l[i] = &InputValue{v}
+ }
+ return l
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/log/log.go b/vendor/github.com/graph-gophers/graphql-go/log/log.go
new file mode 100644
index 00000000..bdada874
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/log/log.go
@@ -0,0 +1,23 @@
+package log
+
+import (
+ "context"
+ "log"
+ "runtime"
+)
+
+// Logger is the interface used to log panics that occur during query execution. It is settable via graphql.ParseSchema
+type Logger interface {
+ LogPanic(ctx context.Context, value interface{})
+}
+
+// DefaultLogger is the default logger used to log panics that occur during query execution
+type DefaultLogger struct{}
+
+// LogPanic is used to log recovered panic values that occur during query execution
+func (l *DefaultLogger) LogPanic(ctx context.Context, value interface{}) {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ log.Printf("graphql: panic occurred: %v\n%s\ncontext: %v", value, buf, ctx)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/nullable_types.go b/vendor/github.com/graph-gophers/graphql-go/nullable_types.go
new file mode 100644
index 00000000..fa5bbfd6
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/nullable_types.go
@@ -0,0 +1,166 @@
+package graphql
+
+import (
+ "fmt"
+ "math"
+)
+
+// NullString is a string that can be null. Use it in input structs to
+// differentiate a value explicitly set to null from an omitted value.
+// When the value is defined (either null or a value) Set is true.
+type NullString struct {
+ Value *string
+ Set bool
+}
+
+func (NullString) ImplementsGraphQLType(name string) bool {
+ return name == "String"
+}
+
+func (s *NullString) UnmarshalGraphQL(input interface{}) error {
+ s.Set = true
+
+ if input == nil {
+ return nil
+ }
+
+ switch v := input.(type) {
+ case string:
+ s.Value = &v
+ return nil
+ default:
+ return fmt.Errorf("wrong type for String: %T", v)
+ }
+}
+
+func (s *NullString) Nullable() {}
+
+// NullBool is a string that can be null. Use it in input structs to
+// differentiate a value explicitly set to null from an omitted value.
+// When the value is defined (either null or a value) Set is true.
+type NullBool struct {
+ Value *bool
+ Set bool
+}
+
+func (NullBool) ImplementsGraphQLType(name string) bool {
+ return name == "Boolean"
+}
+
+func (s *NullBool) UnmarshalGraphQL(input interface{}) error {
+ s.Set = true
+
+ if input == nil {
+ return nil
+ }
+
+ switch v := input.(type) {
+ case bool:
+ s.Value = &v
+ return nil
+ default:
+ return fmt.Errorf("wrong type for Boolean: %T", v)
+ }
+}
+
+func (s *NullBool) Nullable() {}
+
+// NullInt is a string that can be null. Use it in input structs to
+// differentiate a value explicitly set to null from an omitted value.
+// When the value is defined (either null or a value) Set is true.
+type NullInt struct {
+ Value *int32
+ Set bool
+}
+
+func (NullInt) ImplementsGraphQLType(name string) bool {
+ return name == "Int"
+}
+
+func (s *NullInt) UnmarshalGraphQL(input interface{}) error {
+ s.Set = true
+
+ if input == nil {
+ return nil
+ }
+
+ switch v := input.(type) {
+ case int32:
+ s.Value = &v
+ return nil
+ case float64:
+ coerced := int32(v)
+ if v < math.MinInt32 || v > math.MaxInt32 || float64(coerced) != v {
+ return fmt.Errorf("not a 32-bit integer")
+ }
+ s.Value = &coerced
+ return nil
+ default:
+ return fmt.Errorf("wrong type for Int: %T", v)
+ }
+}
+
+func (s *NullInt) Nullable() {}
+
+// NullFloat is a string that can be null. Use it in input structs to
+// differentiate a value explicitly set to null from an omitted value.
+// When the value is defined (either null or a value) Set is true.
+type NullFloat struct {
+ Value *float64
+ Set bool
+}
+
+func (NullFloat) ImplementsGraphQLType(name string) bool {
+ return name == "Float"
+}
+
+func (s *NullFloat) UnmarshalGraphQL(input interface{}) error {
+ s.Set = true
+
+ if input == nil {
+ return nil
+ }
+
+ switch v := input.(type) {
+ case float64:
+ s.Value = &v
+ return nil
+ case int32:
+ coerced := float64(v)
+ s.Value = &coerced
+ return nil
+ case int:
+ coerced := float64(v)
+ s.Value = &coerced
+ return nil
+ default:
+ return fmt.Errorf("wrong type for Float: %T", v)
+ }
+}
+
+func (s *NullFloat) Nullable() {}
+
+// NullTime is a string that can be null. Use it in input structs to
+// differentiate a value explicitly set to null from an omitted value.
+// When the value is defined (either null or a value) Set is true.
+type NullTime struct {
+ Value *Time
+ Set bool
+}
+
+func (NullTime) ImplementsGraphQLType(name string) bool {
+ return name == "Time"
+}
+
+func (s *NullTime) UnmarshalGraphQL(input interface{}) error {
+ s.Set = true
+
+ if input == nil {
+ return nil
+ }
+
+ s.Value = new(Time)
+ return s.Value.UnmarshalGraphQL(input)
+}
+
+func (s *NullTime) Nullable() {}
diff --git a/vendor/github.com/graph-gophers/graphql-go/subscriptions.go b/vendor/github.com/graph-gophers/graphql-go/subscriptions.go
new file mode 100644
index 00000000..34064dc7
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/subscriptions.go
@@ -0,0 +1,96 @@
+package graphql
+
+import (
+ "context"
+ "errors"
+
+ qerrors "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/internal/common"
+ "github.com/graph-gophers/graphql-go/internal/exec"
+ "github.com/graph-gophers/graphql-go/internal/exec/resolvable"
+ "github.com/graph-gophers/graphql-go/internal/exec/selected"
+ "github.com/graph-gophers/graphql-go/internal/query"
+ "github.com/graph-gophers/graphql-go/internal/validation"
+ "github.com/graph-gophers/graphql-go/introspection"
+)
+
+// Subscribe returns a response channel for the given subscription with the schema's
+// resolver. It returns an error if the schema was created without a resolver.
+// If the context gets cancelled, the response channel will be closed and no
+// further resolvers will be called. The context error will be returned as soon
+// as possible (not immediately).
+func (s *Schema) Subscribe(ctx context.Context, queryString string, operationName string, variables map[string]interface{}) (<-chan interface{}, error) {
+ if !s.res.Resolver.IsValid() {
+ return nil, errors.New("schema created without resolver, can not subscribe")
+ }
+ if _, ok := s.schema.EntryPoints["subscription"]; !ok {
+ return nil, errors.New("no subscriptions are offered by the schema")
+ }
+ return s.subscribe(ctx, queryString, operationName, variables, s.res), nil
+}
+
+func (s *Schema) subscribe(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, res *resolvable.Schema) <-chan interface{} {
+ doc, qErr := query.Parse(queryString)
+ if qErr != nil {
+ return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{qErr}})
+ }
+
+ validationFinish := s.validationTracer.TraceValidation(ctx)
+ errs := validation.Validate(s.schema, doc, variables, s.maxDepth)
+ validationFinish(errs)
+ if len(errs) != 0 {
+ return sendAndReturnClosed(&Response{Errors: errs})
+ }
+
+ op, err := getOperation(doc, operationName)
+ if err != nil {
+ return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{qerrors.Errorf("%s", err)}})
+ }
+
+ r := &exec.Request{
+ Request: selected.Request{
+ Doc: doc,
+ Vars: variables,
+ Schema: s.schema,
+ },
+ Limiter: make(chan struct{}, s.maxParallelism),
+ Tracer: s.tracer,
+ Logger: s.logger,
+ PanicHandler: s.panicHandler,
+ SubscribeResolverTimeout: s.subscribeResolverTimeout,
+ }
+ varTypes := make(map[string]*introspection.Type)
+ for _, v := range op.Vars {
+ t, err := common.ResolveType(v.Type, s.schema.Resolve)
+ if err != nil {
+ return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{err}})
+ }
+ varTypes[v.Name.Name] = introspection.WrapType(t)
+ }
+
+ if op.Type == query.Query || op.Type == query.Mutation {
+ data, errs := r.Execute(ctx, res, op)
+ return sendAndReturnClosed(&Response{Data: data, Errors: errs})
+ }
+
+ responses := r.Subscribe(ctx, res, op)
+ c := make(chan interface{})
+ go func() {
+ for resp := range responses {
+ c <- &Response{
+ Data: resp.Data,
+ Errors: resp.Errors,
+ }
+ }
+ close(c)
+ }()
+
+ return c
+}
+
+func sendAndReturnClosed(resp *Response) chan interface{} {
+ c := make(chan interface{}, 1)
+ c <- resp
+ close(c)
+ return c
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/time.go b/vendor/github.com/graph-gophers/graphql-go/time.go
new file mode 100644
index 00000000..974287e7
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/time.go
@@ -0,0 +1,64 @@
+package graphql
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// Time is a custom GraphQL type to represent an instant in time. It has to be added to a schema
+// via "scalar Time" since it is not a predeclared GraphQL type like "ID".
+type Time struct {
+ time.Time
+}
+
+// ImplementsGraphQLType maps this custom Go type
+// to the graphql scalar type in the schema.
+func (Time) ImplementsGraphQLType(name string) bool {
+ return name == "Time"
+}
+
+// UnmarshalGraphQL is a custom unmarshaler for Time
+//
+// This function will be called whenever you use the
+// time scalar as an input
+func (t *Time) UnmarshalGraphQL(input interface{}) error {
+ switch input := input.(type) {
+ case time.Time:
+ t.Time = input
+ return nil
+ case string:
+ var err error
+ t.Time, err = time.Parse(time.RFC3339, input)
+ return err
+ case []byte:
+ var err error
+ t.Time, err = time.Parse(time.RFC3339, string(input))
+ return err
+ case int32:
+ t.Time = time.Unix(int64(input), 0)
+ return nil
+ case int64:
+ if input >= 1e10 {
+ sec := input / 1e9
+ nsec := input - (sec * 1e9)
+ t.Time = time.Unix(sec, nsec)
+ } else {
+ t.Time = time.Unix(input, 0)
+ }
+ return nil
+ case float64:
+ t.Time = time.Unix(int64(input), 0)
+ return nil
+ default:
+ return fmt.Errorf("wrong type for Time: %T", input)
+ }
+}
+
+// MarshalJSON is a custom marshaler for Time
+//
+// This function will be called whenever you
+// query for fields that use the Time type
+func (t Time) MarshalJSON() ([]byte, error) {
+ return json.Marshal(t.Time)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/trace/trace.go b/vendor/github.com/graph-gophers/graphql-go/trace/trace.go
new file mode 100644
index 00000000..8d5d8a71
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/trace/trace.go
@@ -0,0 +1,96 @@
+package trace
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/introspection"
+ opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ "github.com/opentracing/opentracing-go/log"
+)
+
+type TraceQueryFinishFunc func([]*errors.QueryError)
+type TraceFieldFinishFunc func(*errors.QueryError)
+
+type Tracer interface {
+ TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc)
+ TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc)
+}
+
+type OpenTracingTracer struct{}
+
+func (OpenTracingTracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) {
+ span, spanCtx := opentracing.StartSpanFromContext(ctx, "GraphQL request")
+ span.SetTag("graphql.query", queryString)
+
+ if operationName != "" {
+ span.SetTag("graphql.operationName", operationName)
+ }
+
+ if len(variables) != 0 {
+ span.LogFields(log.Object("graphql.variables", variables))
+ }
+
+ return spanCtx, func(errs []*errors.QueryError) {
+ if len(errs) > 0 {
+ msg := errs[0].Error()
+ if len(errs) > 1 {
+ msg += fmt.Sprintf(" (and %d more errors)", len(errs)-1)
+ }
+ ext.Error.Set(span, true)
+ span.SetTag("graphql.error", msg)
+ }
+ span.Finish()
+ }
+}
+
+func (OpenTracingTracer) TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) {
+ if trivial {
+ return ctx, noop
+ }
+
+ span, spanCtx := opentracing.StartSpanFromContext(ctx, label)
+ span.SetTag("graphql.type", typeName)
+ span.SetTag("graphql.field", fieldName)
+ for name, value := range args {
+ span.SetTag("graphql.args."+name, value)
+ }
+
+ return spanCtx, func(err *errors.QueryError) {
+ if err != nil {
+ ext.Error.Set(span, true)
+ span.SetTag("graphql.error", err.Error())
+ }
+ span.Finish()
+ }
+}
+
+func (OpenTracingTracer) TraceValidation(ctx context.Context) TraceValidationFinishFunc {
+ span, _ := opentracing.StartSpanFromContext(ctx, "Validate Query")
+
+ return func(errs []*errors.QueryError) {
+ if len(errs) > 0 {
+ msg := errs[0].Error()
+ if len(errs) > 1 {
+ msg += fmt.Sprintf(" (and %d more errors)", len(errs)-1)
+ }
+ ext.Error.Set(span, true)
+ span.SetTag("graphql.error", msg)
+ }
+ span.Finish()
+ }
+}
+
+func noop(*errors.QueryError) {}
+
+type NoopTracer struct{}
+
+func (NoopTracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) {
+ return ctx, func(errs []*errors.QueryError) {}
+}
+
+func (NoopTracer) TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) {
+ return ctx, func(err *errors.QueryError) {}
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go b/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go
new file mode 100644
index 00000000..bce7a9a4
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go
@@ -0,0 +1,25 @@
+package trace
+
+import (
+ "context"
+
+ "github.com/graph-gophers/graphql-go/errors"
+)
+
+type TraceValidationFinishFunc = TraceQueryFinishFunc
+
+// Deprecated: use ValidationTracerContext.
+type ValidationTracer interface {
+ TraceValidation() TraceValidationFinishFunc
+}
+
+type ValidationTracerContext interface {
+ TraceValidation(ctx context.Context) TraceValidationFinishFunc
+}
+
+type NoopValidationTracer struct{}
+
+// Deprecated: use a Tracer which implements ValidationTracerContext.
+func (NoopValidationTracer) TraceValidation() TraceValidationFinishFunc {
+ return func(errs []*errors.QueryError) {}
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/argument.go b/vendor/github.com/graph-gophers/graphql-go/types/argument.go
new file mode 100644
index 00000000..b2681a28
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/argument.go
@@ -0,0 +1,44 @@
+package types
+
+// Argument is a representation of the GraphQL Argument.
+//
+// https://spec.graphql.org/draft/#sec-Language.Arguments
+type Argument struct {
+ Name Ident
+ Value Value
+}
+
+// ArgumentList is a collection of GraphQL Arguments.
+type ArgumentList []*Argument
+
+// Returns a Value in the ArgumentList by name.
+func (l ArgumentList) Get(name string) (Value, bool) {
+ for _, arg := range l {
+ if arg.Name.Name == name {
+ return arg.Value, true
+ }
+ }
+ return nil, false
+}
+
+// MustGet returns a Value in the ArgumentList by name.
+// MustGet will panic if the argument name is not found in the ArgumentList.
+func (l ArgumentList) MustGet(name string) Value {
+ value, ok := l.Get(name)
+ if !ok {
+ panic("argument not found")
+ }
+ return value
+}
+
+type ArgumentsDefinition []*InputValueDefinition
+
+// Get returns an InputValueDefinition in the ArgumentsDefinition by name or nil if not found.
+func (a ArgumentsDefinition) Get(name string) *InputValueDefinition {
+ for _, inputValue := range a {
+ if inputValue.Name.Name == name {
+ return inputValue
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/directive.go b/vendor/github.com/graph-gophers/graphql-go/types/directive.go
new file mode 100644
index 00000000..0f8a4b99
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/directive.go
@@ -0,0 +1,34 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// Directive is a representation of the GraphQL Directive.
+//
+// http://spec.graphql.org/draft/#sec-Language.Directives
+type Directive struct {
+ Name Ident
+ Arguments ArgumentList
+}
+
+// DirectiveDefinition is a representation of the GraphQL DirectiveDefinition.
+//
+// http://spec.graphql.org/draft/#sec-Type-System.Directives
+type DirectiveDefinition struct {
+ Name string
+ Desc string
+ Locations []string
+ Arguments ArgumentsDefinition
+ Loc errors.Location
+}
+
+type DirectiveList []*Directive
+
+// Returns the Directive in the DirectiveList by name or nil if not found.
+func (l DirectiveList) Get(name string) *Directive {
+ for _, d := range l {
+ if d.Name.Name == name {
+ return d
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/doc.go b/vendor/github.com/graph-gophers/graphql-go/types/doc.go
new file mode 100644
index 00000000..87caa60b
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/doc.go
@@ -0,0 +1,9 @@
+/*
+ Package types represents all types from the GraphQL specification in code.
+
+
+ The names of the Go types, whenever possible, match 1:1 with the names from
+ the specification.
+
+*/
+package types
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/enum.go b/vendor/github.com/graph-gophers/graphql-go/types/enum.go
new file mode 100644
index 00000000..b2c84caa
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/enum.go
@@ -0,0 +1,32 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// EnumTypeDefinition defines a set of possible enum values.
+//
+// Like scalar types, an EnumTypeDefinition also represents a leaf value in a GraphQL type system.
+//
+// http://spec.graphql.org/draft/#sec-Enums
+type EnumTypeDefinition struct {
+ Name string
+ EnumValuesDefinition []*EnumValueDefinition
+ Desc string
+ Directives DirectiveList
+ Loc errors.Location
+}
+
+// EnumValueDefinition are unique values that may be serialized as a string: the name of the
+// represented value.
+//
+// http://spec.graphql.org/draft/#EnumValueDefinition
+type EnumValueDefinition struct {
+ EnumValue string
+ Directives DirectiveList
+ Desc string
+ Loc errors.Location
+}
+
+func (*EnumTypeDefinition) Kind() string { return "ENUM" }
+func (t *EnumTypeDefinition) String() string { return t.Name }
+func (t *EnumTypeDefinition) TypeName() string { return t.Name }
+func (t *EnumTypeDefinition) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/extension.go b/vendor/github.com/graph-gophers/graphql-go/types/extension.go
new file mode 100644
index 00000000..b82ea670
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/extension.go
@@ -0,0 +1,13 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// Extension type defines a GraphQL type extension.
+// Schemas, Objects, Inputs and Scalars can be extended.
+//
+// https://spec.graphql.org/draft/#sec-Type-System-Extensions
+type Extension struct {
+ Type NamedType
+ Directives DirectiveList
+ Loc errors.Location
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/field.go b/vendor/github.com/graph-gophers/graphql-go/types/field.go
new file mode 100644
index 00000000..ea5bca5c
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/field.go
@@ -0,0 +1,39 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// FieldDefinition is a representation of a GraphQL FieldDefinition.
+//
+// http://spec.graphql.org/draft/#FieldDefinition
+type FieldDefinition struct {
+ Name string
+ Arguments ArgumentsDefinition
+ Type Type
+ Directives DirectiveList
+ Desc string
+ Loc errors.Location
+}
+
+// FieldsDefinition is a list of an ObjectTypeDefinition's Fields.
+//
+// https://spec.graphql.org/draft/#FieldsDefinition
+type FieldsDefinition []*FieldDefinition
+
+// Get returns a FieldDefinition in a FieldsDefinition by name or nil if not found.
+func (l FieldsDefinition) Get(name string) *FieldDefinition {
+ for _, f := range l {
+ if f.Name == name {
+ return f
+ }
+ }
+ return nil
+}
+
+// Names returns a slice of FieldDefinition names.
+func (l FieldsDefinition) Names() []string {
+ names := make([]string, len(l))
+ for i, f := range l {
+ names[i] = f.Name
+ }
+ return names
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/fragment.go b/vendor/github.com/graph-gophers/graphql-go/types/fragment.go
new file mode 100644
index 00000000..606219ca
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/fragment.go
@@ -0,0 +1,51 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+type Fragment struct {
+ On TypeName
+ Selections SelectionSet
+}
+
+// InlineFragment is a representation of the GraphQL InlineFragment.
+//
+// http://spec.graphql.org/draft/#InlineFragment
+type InlineFragment struct {
+ Fragment
+ Directives DirectiveList
+ Loc errors.Location
+}
+
+// FragmentDefinition is a representation of the GraphQL FragmentDefinition.
+//
+// http://spec.graphql.org/draft/#FragmentDefinition
+type FragmentDefinition struct {
+ Fragment
+ Name Ident
+ Directives DirectiveList
+ Loc errors.Location
+}
+
+// FragmentSpread is a representation of the GraphQL FragmentSpread.
+//
+// http://spec.graphql.org/draft/#FragmentSpread
+type FragmentSpread struct {
+ Name Ident
+ Directives DirectiveList
+ Loc errors.Location
+}
+
+type FragmentList []*FragmentDefinition
+
+// Returns a FragmentDefinition by name or nil if not found.
+func (l FragmentList) Get(name string) *FragmentDefinition {
+ for _, f := range l {
+ if f.Name.Name == name {
+ return f
+ }
+ }
+ return nil
+}
+
+func (InlineFragment) isSelection() {}
+func (FragmentSpread) isSelection() {}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/input.go b/vendor/github.com/graph-gophers/graphql-go/types/input.go
new file mode 100644
index 00000000..c179bc3e
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/input.go
@@ -0,0 +1,47 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// InputValueDefinition is a representation of the GraphQL InputValueDefinition.
+//
+// http://spec.graphql.org/draft/#InputValueDefinition
+type InputValueDefinition struct {
+ Name Ident
+ Type Type
+ Default Value
+ Desc string
+ Directives DirectiveList
+ Loc errors.Location
+ TypeLoc errors.Location
+}
+
+type InputValueDefinitionList []*InputValueDefinition
+
+// Returns an InputValueDefinition by name or nil if not found.
+func (l InputValueDefinitionList) Get(name string) *InputValueDefinition {
+ for _, v := range l {
+ if v.Name.Name == name {
+ return v
+ }
+ }
+ return nil
+}
+
+// InputObject types define a set of input fields; the input fields are either scalars, enums, or
+// other input objects.
+//
+// This allows arguments to accept arbitrarily complex structs.
+//
+// http://spec.graphql.org/draft/#sec-Input-Objects
+type InputObject struct {
+ Name string
+ Desc string
+ Values ArgumentsDefinition
+ Directives DirectiveList
+ Loc errors.Location
+}
+
+func (*InputObject) Kind() string { return "INPUT_OBJECT" }
+func (t *InputObject) String() string { return t.Name }
+func (t *InputObject) TypeName() string { return t.Name }
+func (t *InputObject) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/interface.go b/vendor/github.com/graph-gophers/graphql-go/types/interface.go
new file mode 100644
index 00000000..e741e591
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/interface.go
@@ -0,0 +1,25 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// InterfaceTypeDefinition recusrively defines list of named fields with their arguments via the
+// implementation chain of interfaces.
+//
+// GraphQL objects can then implement these interfaces which requires that the object type will
+// define all fields defined by those interfaces.
+//
+// http://spec.graphql.org/draft/#sec-Interfaces
+type InterfaceTypeDefinition struct {
+ Name string
+ PossibleTypes []*ObjectTypeDefinition
+ Fields FieldsDefinition
+ Desc string
+ Directives DirectiveList
+ Loc errors.Location
+ Interfaces []*InterfaceTypeDefinition
+}
+
+func (*InterfaceTypeDefinition) Kind() string { return "INTERFACE" }
+func (t *InterfaceTypeDefinition) String() string { return t.Name }
+func (t *InterfaceTypeDefinition) TypeName() string { return t.Name }
+func (t *InterfaceTypeDefinition) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/object.go b/vendor/github.com/graph-gophers/graphql-go/types/object.go
new file mode 100644
index 00000000..e65c79db
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/object.go
@@ -0,0 +1,25 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// ObjectTypeDefinition represents a GraphQL ObjectTypeDefinition.
+//
+// type FooObject {
+// foo: String
+// }
+//
+// https://spec.graphql.org/draft/#sec-Objects
+type ObjectTypeDefinition struct {
+ Name string
+ Interfaces []*InterfaceTypeDefinition
+ Fields FieldsDefinition
+ Desc string
+ Directives DirectiveList
+ InterfaceNames []string
+ Loc errors.Location
+}
+
+func (*ObjectTypeDefinition) Kind() string { return "OBJECT" }
+func (t *ObjectTypeDefinition) String() string { return t.Name }
+func (t *ObjectTypeDefinition) TypeName() string { return t.Name }
+func (t *ObjectTypeDefinition) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/query.go b/vendor/github.com/graph-gophers/graphql-go/types/query.go
new file mode 100644
index 00000000..caca6ef4
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/query.go
@@ -0,0 +1,62 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// ExecutableDefinition represents a set of operations or fragments that can be executed
+// against a schema.
+//
+// http://spec.graphql.org/draft/#ExecutableDefinition
+type ExecutableDefinition struct {
+ Operations OperationList
+ Fragments FragmentList
+}
+
+// OperationDefinition represents a GraphQL Operation.
+//
+// https://spec.graphql.org/draft/#sec-Language.Operations
+type OperationDefinition struct {
+ Type OperationType
+ Name Ident
+ Vars ArgumentsDefinition
+ Selections SelectionSet
+ Directives DirectiveList
+ Loc errors.Location
+}
+
+type OperationType string
+
+// A Selection is a field requested in a GraphQL operation.
+//
+// http://spec.graphql.org/draft/#Selection
+type Selection interface {
+ isSelection()
+}
+
+// A SelectionSet represents a collection of Selections
+//
+// http://spec.graphql.org/draft/#sec-Selection-Sets
+type SelectionSet []Selection
+
+// Field represents a field used in a query.
+type Field struct {
+ Alias Ident
+ Name Ident
+ Arguments ArgumentList
+ Directives DirectiveList
+ SelectionSet SelectionSet
+ SelectionSetLoc errors.Location
+}
+
+func (Field) isSelection() {}
+
+type OperationList []*OperationDefinition
+
+// Get returns an OperationDefinition by name or nil if not found.
+func (l OperationList) Get(name string) *OperationDefinition {
+ for _, f := range l {
+ if f.Name.Name == name {
+ return f
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/scalar.go b/vendor/github.com/graph-gophers/graphql-go/types/scalar.go
new file mode 100644
index 00000000..5bd529a8
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/scalar.go
@@ -0,0 +1,22 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// ScalarTypeDefinition types represent primitive leaf values (e.g. a string or an integer) in a GraphQL type
+// system.
+//
+// GraphQL responses take the form of a hierarchical tree; the leaves on these trees are GraphQL
+// scalars.
+//
+// http://spec.graphql.org/draft/#sec-Scalars
+type ScalarTypeDefinition struct {
+ Name string
+ Desc string
+ Directives DirectiveList
+ Loc errors.Location
+}
+
+func (*ScalarTypeDefinition) Kind() string { return "SCALAR" }
+func (t *ScalarTypeDefinition) String() string { return t.Name }
+func (t *ScalarTypeDefinition) TypeName() string { return t.Name }
+func (t *ScalarTypeDefinition) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/schema.go b/vendor/github.com/graph-gophers/graphql-go/types/schema.go
new file mode 100644
index 00000000..06811a97
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/schema.go
@@ -0,0 +1,42 @@
+package types
+
+// Schema represents a GraphQL service's collective type system capabilities.
+// A schema is defined in terms of the types and directives it supports as well as the root
+// operation types for each kind of operation: `query`, `mutation`, and `subscription`.
+//
+// For a more formal definition, read the relevant section in the specification:
+//
+// http://spec.graphql.org/draft/#sec-Schema
+type Schema struct {
+ // EntryPoints determines the place in the type system where `query`, `mutation`, and
+ // `subscription` operations begin.
+ //
+ // http://spec.graphql.org/draft/#sec-Root-Operation-Types
+ //
+ EntryPoints map[string]NamedType
+
+ // Types are the fundamental unit of any GraphQL schema.
+ // There are six kinds of named types, and two wrapping types.
+ //
+ // http://spec.graphql.org/draft/#sec-Types
+ Types map[string]NamedType
+
+ // Directives are used to annotate various parts of a GraphQL document as an indicator that they
+ // should be evaluated differently by a validator, executor, or client tool such as a code
+ // generator.
+ //
+ // http://spec.graphql.org/#sec-Type-System.Directives
+ Directives map[string]*DirectiveDefinition
+
+ UseFieldResolvers bool
+
+ EntryPointNames map[string]string
+ Objects []*ObjectTypeDefinition
+ Unions []*Union
+ Enums []*EnumTypeDefinition
+ Extensions []*Extension
+}
+
+func (s *Schema) Resolve(name string) Type {
+ return s.Types[name]
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/types.go b/vendor/github.com/graph-gophers/graphql-go/types/types.go
new file mode 100644
index 00000000..df34d08a
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/types.go
@@ -0,0 +1,63 @@
+package types
+
+import (
+ "github.com/graph-gophers/graphql-go/errors"
+)
+
+// TypeName is a base building block for GraphQL type references.
+type TypeName struct {
+ Ident
+}
+
+// NamedType represents a type with a name.
+//
+// http://spec.graphql.org/draft/#NamedType
+type NamedType interface {
+ Type
+ TypeName() string
+ Description() string
+}
+
+type Ident struct {
+ Name string
+ Loc errors.Location
+}
+
+type Type interface {
+ // Kind returns one possible GraphQL type kind. A type kind must be
+ // valid as defined by the GraphQL spec.
+ //
+ // https://spec.graphql.org/draft/#sec-Type-Kinds
+ Kind() string
+
+ // String serializes a Type into a GraphQL specification format type.
+ //
+ // http://spec.graphql.org/draft/#sec-Serialization-Format
+ String() string
+}
+
+// List represents a GraphQL ListType.
+//
+// http://spec.graphql.org/draft/#ListType
+type List struct {
+ // OfType represents the inner-type of a List type.
+ // For example, the List type `[Foo]` has an OfType of Foo.
+ OfType Type
+}
+
+// NonNull represents a GraphQL NonNullType.
+//
+// https://spec.graphql.org/draft/#NonNullType
+type NonNull struct {
+ // OfType represents the inner-type of a NonNull type.
+ // For example, the NonNull type `Foo!` has an OfType of Foo.
+ OfType Type
+}
+
+func (*List) Kind() string { return "LIST" }
+func (*NonNull) Kind() string { return "NON_NULL" }
+func (*TypeName) Kind() string { panic("TypeName needs to be resolved to actual type") }
+
+func (t *List) String() string { return "[" + t.OfType.String() + "]" }
+func (t *NonNull) String() string { return t.OfType.String() + "!" }
+func (*TypeName) String() string { panic("TypeName needs to be resolved to actual type") }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/union.go b/vendor/github.com/graph-gophers/graphql-go/types/union.go
new file mode 100644
index 00000000..bb916673
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/union.go
@@ -0,0 +1,24 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// Union types represent objects that could be one of a list of GraphQL object types, but provides no
+// guaranteed fields between those types.
+//
+// They also differ from interfaces in that object types declare what interfaces they implement, but
+// are not aware of what unions contain them.
+//
+// http://spec.graphql.org/draft/#sec-Unions
+type Union struct {
+ Name string
+ UnionMemberTypes []*ObjectTypeDefinition
+ Desc string
+ Directives DirectiveList
+ TypeNames []string
+ Loc errors.Location
+}
+
+func (*Union) Kind() string { return "UNION" }
+func (t *Union) String() string { return t.Name }
+func (t *Union) TypeName() string { return t.Name }
+func (t *Union) Description() string { return t.Desc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/value.go b/vendor/github.com/graph-gophers/graphql-go/types/value.go
new file mode 100644
index 00000000..9f8d041a
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/value.go
@@ -0,0 +1,141 @@
+package types
+
+import (
+ "strconv"
+ "strings"
+ "text/scanner"
+
+ "github.com/graph-gophers/graphql-go/errors"
+)
+
+// Value represents a literal input or literal default value in the GraphQL Specification.
+//
+// http://spec.graphql.org/draft/#sec-Input-Values
+type Value interface {
+ // Deserialize transforms a GraphQL specification format literal into a Go type.
+ Deserialize(vars map[string]interface{}) interface{}
+
+ // String serializes a Value into a GraphQL specification format literal.
+ String() string
+ Location() errors.Location
+}
+
+// PrimitiveValue represents one of the following GraphQL scalars: Int, Float,
+// String, or Boolean
+type PrimitiveValue struct {
+ Type rune
+ Text string
+ Loc errors.Location
+}
+
+func (val *PrimitiveValue) Deserialize(vars map[string]interface{}) interface{} {
+ switch val.Type {
+ case scanner.Int:
+ value, err := strconv.ParseInt(val.Text, 10, 32)
+ if err != nil {
+ panic(err)
+ }
+ return int32(value)
+
+ case scanner.Float:
+ value, err := strconv.ParseFloat(val.Text, 64)
+ if err != nil {
+ panic(err)
+ }
+ return value
+
+ case scanner.String:
+ value, err := strconv.Unquote(val.Text)
+ if err != nil {
+ panic(err)
+ }
+ return value
+
+ case scanner.Ident:
+ switch val.Text {
+ case "true":
+ return true
+ case "false":
+ return false
+ default:
+ return val.Text
+ }
+
+ default:
+ panic("invalid literal value")
+ }
+}
+
+func (val *PrimitiveValue) String() string { return val.Text }
+func (val *PrimitiveValue) Location() errors.Location { return val.Loc }
+
+// ListValue represents a literal list Value in the GraphQL specification.
+//
+// http://spec.graphql.org/draft/#sec-List-Value
+type ListValue struct {
+ Values []Value
+ Loc errors.Location
+}
+
+func (val *ListValue) Deserialize(vars map[string]interface{}) interface{} {
+ entries := make([]interface{}, len(val.Values))
+ for i, entry := range val.Values {
+ entries[i] = entry.Deserialize(vars)
+ }
+ return entries
+}
+
+func (val *ListValue) String() string {
+ entries := make([]string, len(val.Values))
+ for i, entry := range val.Values {
+ entries[i] = entry.String()
+ }
+ return "[" + strings.Join(entries, ", ") + "]"
+}
+
+func (val *ListValue) Location() errors.Location { return val.Loc }
+
+// ObjectValue represents a literal object Value in the GraphQL specification.
+//
+// http://spec.graphql.org/draft/#sec-Object-Value
+type ObjectValue struct {
+ Fields []*ObjectField
+ Loc errors.Location
+}
+
+// ObjectField represents field/value pairs in a literal ObjectValue.
+type ObjectField struct {
+ Name Ident
+ Value Value
+}
+
+func (val *ObjectValue) Deserialize(vars map[string]interface{}) interface{} {
+ fields := make(map[string]interface{}, len(val.Fields))
+ for _, f := range val.Fields {
+ fields[f.Name.Name] = f.Value.Deserialize(vars)
+ }
+ return fields
+}
+
+func (val *ObjectValue) String() string {
+ entries := make([]string, 0, len(val.Fields))
+ for _, f := range val.Fields {
+ entries = append(entries, f.Name.Name+": "+f.Value.String())
+ }
+ return "{" + strings.Join(entries, ", ") + "}"
+}
+
+func (val *ObjectValue) Location() errors.Location {
+ return val.Loc
+}
+
+// NullValue represents a literal `null` Value in the GraphQL specification.
+//
+// http://spec.graphql.org/draft/#sec-Null-Value
+type NullValue struct {
+ Loc errors.Location
+}
+
+func (val *NullValue) Deserialize(vars map[string]interface{}) interface{} { return nil }
+func (val *NullValue) String() string { return "null" }
+func (val *NullValue) Location() errors.Location { return val.Loc }
diff --git a/vendor/github.com/graph-gophers/graphql-go/types/variable.go b/vendor/github.com/graph-gophers/graphql-go/types/variable.go
new file mode 100644
index 00000000..1a4e2a51
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/types/variable.go
@@ -0,0 +1,15 @@
+package types
+
+import "github.com/graph-gophers/graphql-go/errors"
+
+// Variable is used in GraphQL operations to parameterize an input value.
+//
+// http://spec.graphql.org/draft/#Variable
+type Variable struct {
+ Name string
+ Loc errors.Location
+}
+
+func (v Variable) Deserialize(vars map[string]interface{}) interface{} { return vars[v.Name] }
+func (v Variable) String() string { return "$" + v.Name }
+func (v *Variable) Location() errors.Location { return v.Loc }
diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md
index 465f4b77..bc2f98f0 100644
--- a/vendor/github.com/klauspost/cpuid/v2/README.md
+++ b/vendor/github.com/klauspost/cpuid/v2/README.md
@@ -39,10 +39,10 @@ func main() {
fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore)
fmt.Println("LogicalCores:", CPU.LogicalCores)
fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID)
- fmt.Println("Features:", fmt.Sprintf(strings.Join(CPU.FeatureSet(), ",")))
+ fmt.Println("Features:", strings.Join(CPU.FeatureSet(), ","))
fmt.Println("Cacheline bytes:", CPU.CacheLine)
fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes")
- fmt.Println("L1 Instruction Cache:", CPU.Cache.L1D, "bytes")
+ fmt.Println("L1 Instruction Cache:", CPU.Cache.L1I, "bytes")
fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes")
fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes")
fmt.Println("Frequency", CPU.Hz, "hz")
diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
index 1d88736b..3d543ce9 100644
--- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go
+++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go
@@ -95,10 +95,13 @@ const (
AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one.
BMI1 // Bit Manipulation Instruction Set 1
BMI2 // Bit Manipulation Instruction Set 2
+ CETIBT // Intel CET Indirect Branch Tracking
+ CETSS // Intel CET Shadow Stack
CLDEMOTE // Cache Line Demote
CLMUL // Carry-less Multiplication
CLZERO // CLZERO instruction supported
CMOV // i686 CMOV
+ CMPXCHG8 // CMPXCHG8 instruction
CPBOOST // Core Performance Boost
CX16 // CMPXCHG16B Instruction
ENQCMD // Enqueue Command
@@ -106,6 +109,8 @@ const (
F16C // Half-precision floating-point conversion
FMA3 // Intel FMA 3. Does not imply AVX.
FMA4 // Bulldozer FMA4 functions
+ FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9
+ FXSROPT // FXSAVE/FXRSTOR optimizations
GFNI // Galois Field New Instructions
HLE // Hardware Lock Elision
HTT // Hyperthreading (enabled)
@@ -123,16 +128,19 @@ const (
IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD)
INT_WBINVD // WBINVD/WBNOINVD are interruptible.
INVLPGB // NVLPGB and TLBSYNC instruction supported
+ LAHF // LAHF/SAHF in long mode
LZCNT // LZCNT instruction
MCAOVERFLOW // MCA overflow recovery support.
MCOMMIT // MCOMMIT instruction supported
MMX // standard MMX
MMXEXT // SSE integer functions or AMD MMX ext
+ MOVBE // MOVBE instruction (big-endian)
MOVDIR64B // Move 64 Bytes as Direct Store
MOVDIRI // Move Doubleword as Direct Store
MPX // Intel MPX (Memory Protection Extensions)
MSRIRC // Instruction Retired Counter MSR available
NX // NX (No-Execute) bit
+ OSXSAVE // XSAVE enabled by OS
POPCNT // POPCNT instruction
RDPRU // RDPRU instruction supported
RDRAND // RDRAND instruction is available
@@ -140,6 +148,7 @@ const (
RDTSCP // RDTSCP Instruction
RTM // Restricted Transactional Memory
RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort.
+ SCE // SYSENTER and SYSEXIT instructions
SERIALIZE // Serialize Instruction Execution
SGX // Software Guard Extensions
SGXLC // Software Guard Extensions Launch Control
@@ -160,7 +169,9 @@ const (
VPCLMULQDQ // Carry-Less Multiplication Quadword
WAITPKG // TPAUSE, UMONITOR, UMWAIT
WBNOINVD // Write Back and Do Not Invalidate Cache
+ X87 // FPU
XOP // Bulldozer XOP functions
+ XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV
// ARM features:
AESARM // AES instructions
@@ -311,6 +322,31 @@ func (c CPUInfo) Has(id FeatureID) bool {
return c.featureSet.inSet(id)
}
+// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
+var level1Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2)
+var level2Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3)
+var level3Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE)
+var level4Features = flagSetWith(CMOV, CMPXCHG8, X87, FXSR, MMX, SCE, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL)
+
+// X64Level returns the microarchitecture level detected on the CPU.
+// If features are lacking or non x64 mode, 0 is returned.
+// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels
+func (c CPUInfo) X64Level() int {
+ if c.featureSet.hasSet(level4Features) {
+ return 4
+ }
+ if c.featureSet.hasSet(level3Features) {
+ return 3
+ }
+ if c.featureSet.hasSet(level2Features) {
+ return 2
+ }
+ if c.featureSet.hasSet(level1Features) {
+ return 1
+ }
+ return 0
+}
+
// Disable will disable one or several features.
func (c *CPUInfo) Disable(ids ...FeatureID) bool {
for _, id := range ids {
@@ -335,9 +371,7 @@ func (c CPUInfo) IsVendor(v Vendor) bool {
func (c CPUInfo) FeatureSet() []string {
s := make([]string, 0)
- for _, f := range c.featureSet.Strings() {
- s = append(s, f)
- }
+ s = append(s, c.featureSet.Strings()...)
return s
}
@@ -499,6 +533,24 @@ func (s *flagSet) or(other flagSet) {
}
}
+// hasSet returns whether all features are present.
+func (s flagSet) hasSet(other flagSet) bool {
+ for i, v := range other[:] {
+ if s[i]&v != v {
+ return false
+ }
+ }
+ return true
+}
+
+func flagSetWith(feat ...FeatureID) flagSet {
+ var res flagSet
+ for _, f := range feat {
+ res.set(f)
+ }
+ return res
+}
+
// ParseFeature will parse the string and return the ID of the matching feature.
// Will return UNKNOWN if not found.
func ParseFeature(s string) FeatureID {
@@ -708,6 +760,7 @@ func (c *CPUInfo) cacheSize() {
if maxFunctionID() < 4 {
return
}
+ c.Cache.L1I, c.Cache.L1D, c.Cache.L2, c.Cache.L3 = 0, 0, 0, 0
for i := uint32(0); ; i++ {
eax, ebx, ecx, _ := cpuidex(4, i)
cacheType := eax & 15
@@ -800,8 +853,6 @@ func (c *CPUInfo) cacheSize() {
}
}
}
-
- return
}
type SGXEPCSection struct {
@@ -865,9 +916,14 @@ func support() flagSet {
family, model := familyModel()
_, _, c, d := cpuid(1)
+ fs.setIf((d&(1<<0)) != 0, X87)
+ fs.setIf((d&(1<<8)) != 0, CMPXCHG8)
+ fs.setIf((d&(1<<11)) != 0, SCE)
fs.setIf((d&(1<<15)) != 0, CMOV)
+ fs.setIf((d&(1<<22)) != 0, MMXEXT)
fs.setIf((d&(1<<23)) != 0, MMX)
- fs.setIf((d&(1<<25)) != 0, MMXEXT)
+ fs.setIf((d&(1<<24)) != 0, FXSR)
+ fs.setIf((d&(1<<25)) != 0, FXSROPT)
fs.setIf((d&(1<<25)) != 0, SSE)
fs.setIf((d&(1<<26)) != 0, SSE2)
fs.setIf((c&1) != 0, SSE3)
@@ -877,6 +933,7 @@ func support() flagSet {
fs.setIf((c&0x00100000) != 0, SSE42)
fs.setIf((c&(1<<25)) != 0, AESNI)
fs.setIf((c&(1<<1)) != 0, CLMUL)
+ fs.setIf(c&(1<<22) != 0, MOVBE)
fs.setIf(c&(1<<23) != 0, POPCNT)
fs.setIf(c&(1<<30) != 0, RDRAND)
@@ -892,6 +949,8 @@ func support() flagSet {
if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 {
fs.setIf(threadsPerCore() > 1, HTT)
}
+ fs.setIf(c&1<<26 != 0, XSAVE)
+ fs.setIf(c&1<<27 != 0, OSXSAVE)
// Check XGETBV/XSAVE (26), OXSAVE (27) and AVX (28) bits
const avxCheck = 1<<26 | 1<<27 | 1<<28
if c&avxCheck == avxCheck {
@@ -936,6 +995,7 @@ func support() flagSet {
fs.setIf(ebx&(1<<29) != 0, SHA)
// CPUID.(EAX=7, ECX=0).ECX
fs.setIf(ecx&(1<<5) != 0, WAITPKG)
+ fs.setIf(ecx&(1<<7) != 0, CETSS)
fs.setIf(ecx&(1<<25) != 0, CLDEMOTE)
fs.setIf(ecx&(1<<27) != 0, MOVDIRI)
fs.setIf(ecx&(1<<28) != 0, MOVDIR64B)
@@ -945,6 +1005,7 @@ func support() flagSet {
fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT)
fs.setIf(edx&(1<<14) != 0, SERIALIZE)
fs.setIf(edx&(1<<16) != 0, TSXLDTRK)
+ fs.setIf(edx&(1<<20) != 0, CETIBT)
fs.setIf(edx&(1<<26) != 0, IBPB)
fs.setIf(edx&(1<<27) != 0, STIBP)
@@ -996,6 +1057,7 @@ func support() flagSet {
fs.set(LZCNT)
fs.set(POPCNT)
}
+ fs.setIf((c&(1<<0)) != 0, LAHF)
fs.setIf((c&(1<<10)) != 0, IBS)
fs.setIf((d&(1<<31)) != 0, AMD3DNOW)
fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT)
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
index 9bf9f77f..9a53504a 100644
--- a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
@@ -1,6 +1,7 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
-//+build arm64,!gccgo,!noasm,!appengine
+//go:build arm64 && !gccgo && !noasm && !appengine
+// +build arm64,!gccgo,!noasm,!appengine
package cpuid
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
index e9c8606a..9636c2bc 100644
--- a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
@@ -1,6 +1,7 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
-//+build !amd64,!386,!arm64 gccgo noasm appengine
+//go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine
+// +build !amd64,!386,!arm64 gccgo noasm appengine
package cpuid
diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
index 367c35c8..35678d8a 100644
--- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
+++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
@@ -1,6 +1,7 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
-//+build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine
+//go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine)
+// +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine
package cpuid
diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
index b1fe42e4..02fe232a 100644
--- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
+++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
@@ -36,103 +36,114 @@ func _() {
_ = x[AVXSLOW-26]
_ = x[BMI1-27]
_ = x[BMI2-28]
- _ = x[CLDEMOTE-29]
- _ = x[CLMUL-30]
- _ = x[CLZERO-31]
- _ = x[CMOV-32]
- _ = x[CPBOOST-33]
- _ = x[CX16-34]
- _ = x[ENQCMD-35]
- _ = x[ERMS-36]
- _ = x[F16C-37]
- _ = x[FMA3-38]
- _ = x[FMA4-39]
- _ = x[GFNI-40]
- _ = x[HLE-41]
- _ = x[HTT-42]
- _ = x[HWA-43]
- _ = x[HYPERVISOR-44]
- _ = x[IBPB-45]
- _ = x[IBS-46]
- _ = x[IBSBRNTRGT-47]
- _ = x[IBSFETCHSAM-48]
- _ = x[IBSFFV-49]
- _ = x[IBSOPCNT-50]
- _ = x[IBSOPCNTEXT-51]
- _ = x[IBSOPSAM-52]
- _ = x[IBSRDWROPCNT-53]
- _ = x[IBSRIPINVALIDCHK-54]
- _ = x[INT_WBINVD-55]
- _ = x[INVLPGB-56]
- _ = x[LZCNT-57]
- _ = x[MCAOVERFLOW-58]
- _ = x[MCOMMIT-59]
- _ = x[MMX-60]
- _ = x[MMXEXT-61]
- _ = x[MOVDIR64B-62]
- _ = x[MOVDIRI-63]
- _ = x[MPX-64]
- _ = x[MSRIRC-65]
- _ = x[NX-66]
- _ = x[POPCNT-67]
- _ = x[RDPRU-68]
- _ = x[RDRAND-69]
- _ = x[RDSEED-70]
- _ = x[RDTSCP-71]
- _ = x[RTM-72]
- _ = x[RTM_ALWAYS_ABORT-73]
- _ = x[SERIALIZE-74]
- _ = x[SGX-75]
- _ = x[SGXLC-76]
- _ = x[SHA-77]
- _ = x[SSE-78]
- _ = x[SSE2-79]
- _ = x[SSE3-80]
- _ = x[SSE4-81]
- _ = x[SSE42-82]
- _ = x[SSE4A-83]
- _ = x[SSSE3-84]
- _ = x[STIBP-85]
- _ = x[SUCCOR-86]
- _ = x[TBM-87]
- _ = x[TSXLDTRK-88]
- _ = x[VAES-89]
- _ = x[VMX-90]
- _ = x[VPCLMULQDQ-91]
- _ = x[WAITPKG-92]
- _ = x[WBNOINVD-93]
- _ = x[XOP-94]
- _ = x[AESARM-95]
- _ = x[ARMCPUID-96]
- _ = x[ASIMD-97]
- _ = x[ASIMDDP-98]
- _ = x[ASIMDHP-99]
- _ = x[ASIMDRDM-100]
- _ = x[ATOMICS-101]
- _ = x[CRC32-102]
- _ = x[DCPOP-103]
- _ = x[EVTSTRM-104]
- _ = x[FCMA-105]
- _ = x[FP-106]
- _ = x[FPHP-107]
- _ = x[GPA-108]
- _ = x[JSCVT-109]
- _ = x[LRCPC-110]
- _ = x[PMULL-111]
- _ = x[SHA1-112]
- _ = x[SHA2-113]
- _ = x[SHA3-114]
- _ = x[SHA512-115]
- _ = x[SM3-116]
- _ = x[SM4-117]
- _ = x[SVE-118]
- _ = x[lastID-119]
+ _ = x[CETIBT-29]
+ _ = x[CETSS-30]
+ _ = x[CLDEMOTE-31]
+ _ = x[CLMUL-32]
+ _ = x[CLZERO-33]
+ _ = x[CMOV-34]
+ _ = x[CMPXCHG8-35]
+ _ = x[CPBOOST-36]
+ _ = x[CX16-37]
+ _ = x[ENQCMD-38]
+ _ = x[ERMS-39]
+ _ = x[F16C-40]
+ _ = x[FMA3-41]
+ _ = x[FMA4-42]
+ _ = x[FXSR-43]
+ _ = x[FXSROPT-44]
+ _ = x[GFNI-45]
+ _ = x[HLE-46]
+ _ = x[HTT-47]
+ _ = x[HWA-48]
+ _ = x[HYPERVISOR-49]
+ _ = x[IBPB-50]
+ _ = x[IBS-51]
+ _ = x[IBSBRNTRGT-52]
+ _ = x[IBSFETCHSAM-53]
+ _ = x[IBSFFV-54]
+ _ = x[IBSOPCNT-55]
+ _ = x[IBSOPCNTEXT-56]
+ _ = x[IBSOPSAM-57]
+ _ = x[IBSRDWROPCNT-58]
+ _ = x[IBSRIPINVALIDCHK-59]
+ _ = x[INT_WBINVD-60]
+ _ = x[INVLPGB-61]
+ _ = x[LAHF-62]
+ _ = x[LZCNT-63]
+ _ = x[MCAOVERFLOW-64]
+ _ = x[MCOMMIT-65]
+ _ = x[MMX-66]
+ _ = x[MMXEXT-67]
+ _ = x[MOVBE-68]
+ _ = x[MOVDIR64B-69]
+ _ = x[MOVDIRI-70]
+ _ = x[MPX-71]
+ _ = x[MSRIRC-72]
+ _ = x[NX-73]
+ _ = x[OSXSAVE-74]
+ _ = x[POPCNT-75]
+ _ = x[RDPRU-76]
+ _ = x[RDRAND-77]
+ _ = x[RDSEED-78]
+ _ = x[RDTSCP-79]
+ _ = x[RTM-80]
+ _ = x[RTM_ALWAYS_ABORT-81]
+ _ = x[SCE-82]
+ _ = x[SERIALIZE-83]
+ _ = x[SGX-84]
+ _ = x[SGXLC-85]
+ _ = x[SHA-86]
+ _ = x[SSE-87]
+ _ = x[SSE2-88]
+ _ = x[SSE3-89]
+ _ = x[SSE4-90]
+ _ = x[SSE42-91]
+ _ = x[SSE4A-92]
+ _ = x[SSSE3-93]
+ _ = x[STIBP-94]
+ _ = x[SUCCOR-95]
+ _ = x[TBM-96]
+ _ = x[TSXLDTRK-97]
+ _ = x[VAES-98]
+ _ = x[VMX-99]
+ _ = x[VPCLMULQDQ-100]
+ _ = x[WAITPKG-101]
+ _ = x[WBNOINVD-102]
+ _ = x[X87-103]
+ _ = x[XOP-104]
+ _ = x[XSAVE-105]
+ _ = x[AESARM-106]
+ _ = x[ARMCPUID-107]
+ _ = x[ASIMD-108]
+ _ = x[ASIMDDP-109]
+ _ = x[ASIMDHP-110]
+ _ = x[ASIMDRDM-111]
+ _ = x[ATOMICS-112]
+ _ = x[CRC32-113]
+ _ = x[DCPOP-114]
+ _ = x[EVTSTRM-115]
+ _ = x[FCMA-116]
+ _ = x[FP-117]
+ _ = x[FPHP-118]
+ _ = x[GPA-119]
+ _ = x[JSCVT-120]
+ _ = x[LRCPC-121]
+ _ = x[PMULL-122]
+ _ = x[SHA1-123]
+ _ = x[SHA2-124]
+ _ = x[SHA3-125]
+ _ = x[SHA512-126]
+ _ = x[SM3-127]
+ _ = x[SM4-128]
+ _ = x[SVE-129]
+ _ = x[lastID-130]
_ = x[firstID-0]
}
-const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CLDEMOTECLMULCLZEROCMOVCPBOOSTCX16ENQCMDERMSF16CFMA3FMA4GFNIHLEHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKINT_WBINVDINVLPGBLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVDIR64BMOVDIRIMPXMSRIRCNXPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSUCCORTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDXOPAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
+const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPXCHG8CPBOOSTCX16ENQCMDERMSF16CFMA3FMA4FXSRFXSROPTGFNIHLEHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKINT_WBINVDINVLPGBLAHFLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMPXMSRIRCNXOSXSAVEPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSCESERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSUCCORTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDX87XOPXSAVEAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
-var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 234, 238, 246, 251, 257, 261, 268, 272, 278, 282, 286, 290, 294, 298, 301, 304, 307, 317, 321, 324, 334, 345, 351, 359, 370, 378, 390, 406, 416, 423, 428, 439, 446, 449, 455, 464, 471, 474, 480, 482, 488, 493, 499, 505, 511, 514, 530, 539, 542, 547, 550, 553, 557, 561, 565, 570, 575, 580, 585, 591, 594, 602, 606, 609, 619, 626, 634, 637, 643, 651, 656, 663, 670, 678, 685, 690, 695, 702, 706, 708, 712, 715, 720, 725, 730, 734, 738, 742, 748, 751, 754, 757, 763}
+var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 234, 238, 244, 249, 257, 262, 268, 272, 280, 287, 291, 297, 301, 305, 309, 313, 317, 324, 328, 331, 334, 337, 347, 351, 354, 364, 375, 381, 389, 400, 408, 420, 436, 446, 453, 457, 462, 473, 480, 483, 489, 494, 503, 510, 513, 519, 521, 528, 534, 539, 545, 551, 557, 560, 576, 579, 588, 591, 596, 599, 602, 606, 610, 614, 619, 624, 629, 634, 640, 643, 651, 655, 658, 668, 675, 683, 686, 689, 694, 700, 708, 713, 720, 727, 735, 742, 747, 752, 759, 763, 765, 769, 772, 777, 782, 787, 791, 795, 799, 805, 808, 811, 814, 820}
func (i FeatureID) String() string {
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
index 1a951e6c..8733ba34 100644
--- a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
+++ b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
@@ -1,8 +1,7 @@
// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file.
-// +build arm64
-// +build !linux
-// +build !darwin
+//go:build arm64 && !linux && !darwin
+// +build arm64,!linux,!darwin
package cpuid
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
index 4d0b8b46..f8f201b5 100644
--- a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
+++ b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
@@ -1,6 +1,7 @@
// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file.
-//+build nounsafe
+//go:build nounsafe
+// +build nounsafe
package cpuid
diff --git a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
index 32980028..92af622e 100644
--- a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
+++ b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
@@ -1,6 +1,7 @@
// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file.
-//+build !nounsafe
+//go:build !nounsafe
+// +build !nounsafe
package cpuid
diff --git a/vendor/github.com/labstack/echo/v4/CHANGELOG.md b/vendor/github.com/labstack/echo/v4/CHANGELOG.md
index 461ac89c..ba75d71f 100644
--- a/vendor/github.com/labstack/echo/v4/CHANGELOG.md
+++ b/vendor/github.com/labstack/echo/v4/CHANGELOG.md
@@ -1,5 +1,29 @@
# Changelog
+## v4.7.2 - 2022-03-16
+
+**Fixes**
+
+* Fix nil pointer exception when calling Start again after address binding error [#2131](https://github.com/labstack/echo/pull/2131)
+* Fix CSRF middleware not being able to extract token from multipart/form-data form [#2136](https://github.com/labstack/echo/pull/2136)
+* Fix Timeout middleware write race [#2126](https://github.com/labstack/echo/pull/2126)
+
+**Enhancements**
+
+* Recover middleware should not log panic for aborted handler [#2134](https://github.com/labstack/echo/pull/2134)
+
+
+## v4.7.1 - 2022-03-13
+
+**Fixes**
+
+* Fix `e.Static`, `.File()`, `c.Attachment()` being picky with paths starting with `./`, `../` and `/` after 4.7.0 introduced echo.Filesystem support (Go1.16+) [#2123](https://github.com/labstack/echo/pull/2123)
+
+**Enhancements**
+
+* Remove some unused code [#2116](https://github.com/labstack/echo/pull/2116)
+
+
## v4.7.0 - 2022-03-01
**Enhancements**
diff --git a/vendor/github.com/labstack/echo/v4/echo.go b/vendor/github.com/labstack/echo/v4/echo.go
index 143f9ffe..8829619c 100644
--- a/vendor/github.com/labstack/echo/v4/echo.go
+++ b/vendor/github.com/labstack/echo/v4/echo.go
@@ -246,7 +246,7 @@ const (
const (
// Version of Echo
- Version = "4.7.0"
+ Version = "4.7.2"
website = "https://echo.labstack.com"
// http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo
banner = `
@@ -732,7 +732,7 @@ func (e *Echo) StartServer(s *http.Server) (err error) {
return s.Serve(e.Listener)
}
-func (e *Echo) configureServer(s *http.Server) (err error) {
+func (e *Echo) configureServer(s *http.Server) error {
// Setup
e.colorer.SetOutput(e.Logger.Output())
s.ErrorLog = e.StdLogger
@@ -747,10 +747,11 @@ func (e *Echo) configureServer(s *http.Server) (err error) {
if s.TLSConfig == nil {
if e.Listener == nil {
- e.Listener, err = newListener(s.Addr, e.ListenerNetwork)
+ l, err := newListener(s.Addr, e.ListenerNetwork)
if err != nil {
return err
}
+ e.Listener = l
}
if !e.HidePort {
e.colorer.Printf("⇨ http server started on %s\n", e.colorer.Green(e.Listener.Addr()))
@@ -791,7 +792,7 @@ func (e *Echo) TLSListenerAddr() net.Addr {
}
// StartH2CServer starts a custom http/2 server with h2c (HTTP/2 Cleartext).
-func (e *Echo) StartH2CServer(address string, h2s *http2.Server) (err error) {
+func (e *Echo) StartH2CServer(address string, h2s *http2.Server) error {
e.startupMutex.Lock()
// Setup
s := e.Server
@@ -808,11 +809,12 @@ func (e *Echo) StartH2CServer(address string, h2s *http2.Server) (err error) {
}
if e.Listener == nil {
- e.Listener, err = newListener(s.Addr, e.ListenerNetwork)
+ l, err := newListener(s.Addr, e.ListenerNetwork)
if err != nil {
e.startupMutex.Unlock()
return err
}
+ e.Listener = l
}
if !e.HidePort {
e.colorer.Printf("⇨ http server started on %s\n", e.colorer.Green(e.Listener.Addr()))
diff --git a/vendor/github.com/labstack/echo/v4/echo_fs_go1.16.go b/vendor/github.com/labstack/echo/v4/echo_fs_go1.16.go
index 435459de..eb17768a 100644
--- a/vendor/github.com/labstack/echo/v4/echo_fs_go1.16.go
+++ b/vendor/github.com/labstack/echo/v4/echo_fs_go1.16.go
@@ -10,6 +10,7 @@ import (
"net/url"
"os"
"path/filepath"
+ "runtime"
"strings"
)
@@ -94,10 +95,12 @@ func StaticFileHandler(file string, filesystem fs.FS) HandlerFunc {
}
}
-// defaultFS emulates os.Open behaviour with filesystem opened by `os.DirFs`. Difference between `os.Open` and `fs.Open`
-// is that FS does not allow to open path that start with `..` or `/` etc. For example previously you could have `../images`
-// in your application but `fs := os.DirFS("./")` would not allow you to use `fs.Open("../images")` and this would break
-// all old applications that rely on being able to traverse up from current executable run path.
+// defaultFS exists to preserve pre v4.7.0 behaviour where files were open by `os.Open`.
+// v4.7 introduced `echo.Filesystem` field which is Go1.16+ `fs.Fs` interface.
+// Difference between `os.Open` and `fs.Open` is that FS does not allow opening path that start with `.`, `..` or `/`
+// etc. For example previously you could have `../images` in your application but `fs := os.DirFS("./")` would not
+// allow you to use `fs.Open("../images")` and this would break all old applications that rely on being able to
+// traverse up from current executable run path.
// NB: private because you really should use fs.FS implementation instances
type defaultFS struct {
prefix string
@@ -108,20 +111,26 @@ func newDefaultFS() *defaultFS {
dir, _ := os.Getwd()
return &defaultFS{
prefix: dir,
- fs: os.DirFS(dir),
+ fs: nil,
}
}
func (fs defaultFS) Open(name string) (fs.File, error) {
+ if fs.fs == nil {
+ return os.Open(name)
+ }
return fs.fs.Open(name)
}
func subFS(currentFs fs.FS, root string) (fs.FS, error) {
root = filepath.ToSlash(filepath.Clean(root)) // note: fs.FS operates only with slashes. `ToSlash` is necessary for Windows
if dFS, ok := currentFs.(*defaultFS); ok {
- // we need to make exception for `defaultFS` instances as it interprets root prefix differently from fs.FS to
- // allow cases when root is given as `../somepath` which is not valid for fs.FS
- root = filepath.Join(dFS.prefix, root)
+ // we need to make exception for `defaultFS` instances as it interprets root prefix differently from fs.FS.
+ // fs.Fs.Open does not like relative paths ("./", "../") and absolute paths at all but prior echo.Filesystem we
+ // were able to use paths like `./myfile.log`, `/etc/hosts` and these would work fine with `os.Open` but not with fs.Fs
+ if isRelativePath(root) {
+ root = filepath.Join(dFS.prefix, root)
+ }
return &defaultFS{
prefix: root,
fs: os.DirFS(root),
@@ -130,6 +139,21 @@ func subFS(currentFs fs.FS, root string) (fs.FS, error) {
return fs.Sub(currentFs, root)
}
+func isRelativePath(path string) bool {
+ if path == "" {
+ return true
+ }
+ if path[0] == '/' {
+ return false
+ }
+ if runtime.GOOS == "windows" && strings.IndexByte(path, ':') != -1 {
+ // https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#file_and_directory_names
+ // https://docs.microsoft.com/en-us/dotnet/standard/io/file-path-formats
+ return false
+ }
+ return true
+}
+
// MustSubFS creates sub FS from current filesystem or panic on failure.
// Panic happens when `fsRoot` contains invalid path according to `fs.ValidPath` rules.
//
diff --git a/vendor/github.com/labstack/echo/v4/middleware/extractor.go b/vendor/github.com/labstack/echo/v4/middleware/extractor.go
index a57ed4e1..afdfd819 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/extractor.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/extractor.go
@@ -168,8 +168,8 @@ func valuesFromCookie(name string) ValuesExtractor {
// valuesFromForm returns a function that extracts values from the form field.
func valuesFromForm(name string) ValuesExtractor {
return func(c echo.Context) ([]string, error) {
- if parseErr := c.Request().ParseForm(); parseErr != nil {
- return nil, fmt.Errorf("valuesFromForm parse form failed: %w", parseErr)
+ if c.Request().Form == nil {
+ _ = c.Request().ParseMultipartForm(32 << 20) // same what `c.Request().FormValue(name)` does
}
values := c.Request().Form[name]
if len(values) == 0 {
diff --git a/vendor/github.com/labstack/echo/v4/middleware/recover.go b/vendor/github.com/labstack/echo/v4/middleware/recover.go
index a621a9ef..7b612853 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/recover.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/recover.go
@@ -2,6 +2,7 @@ package middleware
import (
"fmt"
+ "net/http"
"runtime"
"github.com/labstack/echo/v4"
@@ -77,6 +78,9 @@ func RecoverWithConfig(config RecoverConfig) echo.MiddlewareFunc {
defer func() {
if r := recover(); r != nil {
+ if r == http.ErrAbortHandler {
+ panic(r)
+ }
err, ok := r.(error)
if !ok {
err = fmt.Errorf("%v", r)
diff --git a/vendor/github.com/labstack/echo/v4/middleware/timeout.go b/vendor/github.com/labstack/echo/v4/middleware/timeout.go
index 768ef8d7..4e8836c8 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/timeout.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/timeout.go
@@ -2,10 +2,10 @@ package middleware
import (
"context"
+ "github.com/labstack/echo/v4"
"net/http"
+ "sync"
"time"
-
- "github.com/labstack/echo/v4"
)
// ---------------------------------------------------------------------------------------------------------------
@@ -55,29 +55,27 @@ import (
// })
//
-type (
- // TimeoutConfig defines the config for Timeout middleware.
- TimeoutConfig struct {
- // Skipper defines a function to skip middleware.
- Skipper Skipper
-
- // ErrorMessage is written to response on timeout in addition to http.StatusServiceUnavailable (503) status code
- // It can be used to define a custom timeout error message
- ErrorMessage string
-
- // OnTimeoutRouteErrorHandler is an error handler that is executed for error that was returned from wrapped route after
- // request timeouted and we already had sent the error code (503) and message response to the client.
- // NB: do not write headers/body inside this handler. The response has already been sent to the client and response writer
- // will not accept anything no more. If you want to know what actual route middleware timeouted use `c.Path()`
- OnTimeoutRouteErrorHandler func(err error, c echo.Context)
-
- // Timeout configures a timeout for the middleware, defaults to 0 for no timeout
- // NOTE: when difference between timeout duration and handler execution time is almost the same (in range of 100microseconds)
- // the result of timeout does not seem to be reliable - could respond timeout, could respond handler output
- // difference over 500microseconds (0.5millisecond) response seems to be reliable
- Timeout time.Duration
- }
-)
+// TimeoutConfig defines the config for Timeout middleware.
+type TimeoutConfig struct {
+ // Skipper defines a function to skip middleware.
+ Skipper Skipper
+
+ // ErrorMessage is written to response on timeout in addition to http.StatusServiceUnavailable (503) status code
+ // It can be used to define a custom timeout error message
+ ErrorMessage string
+
+ // OnTimeoutRouteErrorHandler is an error handler that is executed for error that was returned from wrapped route after
+ // request timeouted and we already had sent the error code (503) and message response to the client.
+ // NB: do not write headers/body inside this handler. The response has already been sent to the client and response writer
+ // will not accept anything no more. If you want to know what actual route middleware timeouted use `c.Path()`
+ OnTimeoutRouteErrorHandler func(err error, c echo.Context)
+
+ // Timeout configures a timeout for the middleware, defaults to 0 for no timeout
+ // NOTE: when difference between timeout duration and handler execution time is almost the same (in range of 100microseconds)
+ // the result of timeout does not seem to be reliable - could respond timeout, could respond handler output
+ // difference over 500microseconds (0.5millisecond) response seems to be reliable
+ Timeout time.Duration
+}
var (
// DefaultTimeoutConfig is the default Timeout middleware config.
@@ -94,10 +92,17 @@ func Timeout() echo.MiddlewareFunc {
return TimeoutWithConfig(DefaultTimeoutConfig)
}
-// TimeoutWithConfig returns a Timeout middleware with config.
-// See: `Timeout()`.
+// TimeoutWithConfig returns a Timeout middleware with config or panics on invalid configuration.
func TimeoutWithConfig(config TimeoutConfig) echo.MiddlewareFunc {
- // Defaults
+ mw, err := config.ToMiddleware()
+ if err != nil {
+ panic(err)
+ }
+ return mw
+}
+
+// ToMiddleware converts Config to middleware or returns an error for invalid configuration
+func (config TimeoutConfig) ToMiddleware() (echo.MiddlewareFunc, error) {
if config.Skipper == nil {
config.Skipper = DefaultTimeoutConfig.Skipper
}
@@ -108,26 +113,29 @@ func TimeoutWithConfig(config TimeoutConfig) echo.MiddlewareFunc {
return next(c)
}
+ errChan := make(chan error, 1)
handlerWrapper := echoHandlerFuncWrapper{
+ writer: &ignorableWriter{ResponseWriter: c.Response().Writer},
ctx: c,
handler: next,
- errChan: make(chan error, 1),
+ errChan: errChan,
errHandler: config.OnTimeoutRouteErrorHandler,
}
handler := http.TimeoutHandler(handlerWrapper, config.Timeout, config.ErrorMessage)
- handler.ServeHTTP(c.Response().Writer, c.Request())
+ handler.ServeHTTP(handlerWrapper.writer, c.Request())
select {
- case err := <-handlerWrapper.errChan:
+ case err := <-errChan:
return err
default:
return nil
}
}
- }
+ }, nil
}
type echoHandlerFuncWrapper struct {
+ writer *ignorableWriter
ctx echo.Context
handler echo.HandlerFunc
errHandler func(err error, c echo.Context)
@@ -160,23 +168,53 @@ func (t echoHandlerFuncWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Reques
}
return // on timeout we can not send handler error to client because `http.TimeoutHandler` has already sent headers
}
- // we restore original writer only for cases we did not timeout. On timeout we have already sent response to client
- // and should not anymore send additional headers/data
- // so on timeout writer stays what http.TimeoutHandler uses and prevents writing headers/body
if err != nil {
- // Error must be written into Writer created in `http.TimeoutHandler` so to get Response into `commited` state.
- // So call global error handler to write error to the client. This is needed or `http.TimeoutHandler` will send
- // status code by itself and after that our tries to write status code will not work anymore and/or create errors in
- // log about `superfluous response.WriteHeader call from`
- t.ctx.Error(err)
- // we pass error from handler to middlewares up in handler chain to act on it if needed. But this means that
- // global error handler is probably be called twice as `t.ctx.Error` already does that.
-
- // NB: later call of the global error handler or middlewares will not take any effect, as echo.Response will be
- // already marked as `committed` because we called global error handler above.
- t.ctx.Response().Writer = originalWriter // make sure we restore before we signal original coroutine about the error
+ // This is needed as `http.TimeoutHandler` will write status code by itself on error and after that our tries to write
+ // status code will not work anymore as Echo.Response thinks it has been already "committed" and further writes
+ // create errors in log about `superfluous response.WriteHeader call from`
+ t.writer.Ignore(true)
+ t.ctx.Response().Writer = originalWriter // make sure we restore writer before we signal original coroutine about the error
+ // we pass error from handler to middlewares up in handler chain to act on it if needed.
t.errChan <- err
return
}
+ // we restore original writer only for cases we did not timeout. On timeout we have already sent response to client
+ // and should not anymore send additional headers/data
+ // so on timeout writer stays what http.TimeoutHandler uses and prevents writing headers/body
t.ctx.Response().Writer = originalWriter
}
+
+// ignorableWriter is ResponseWriter implementations that allows us to mark writer to ignore further write calls. This
+// is handy in cases when you do not have direct control of code being executed (3rd party middleware) but want to make
+// sure that external code will not be able to write response to the client.
+// Writer is coroutine safe for writes.
+type ignorableWriter struct {
+ http.ResponseWriter
+
+ lock sync.Mutex
+ ignoreWrites bool
+}
+
+func (w *ignorableWriter) Ignore(ignore bool) {
+ w.lock.Lock()
+ w.ignoreWrites = ignore
+ w.lock.Unlock()
+}
+
+func (w *ignorableWriter) WriteHeader(code int) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.ignoreWrites {
+ return
+ }
+ w.ResponseWriter.WriteHeader(code)
+}
+
+func (w *ignorableWriter) Write(b []byte) (int, error) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ if w.ignoreWrites {
+ return len(b), nil
+ }
+ return w.ResponseWriter.Write(b)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/bulk_export.go b/vendor/github.com/mattermost/mattermost-server/v6/model/bulk_export.go
new file mode 100644
index 00000000..b18c32a3
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/bulk_export.go
@@ -0,0 +1,13 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package model
+
+// ExportDataDir is the name of the directory were to store additional data
+// included with the export (e.g. file attachments).
+const ExportDataDir = "data"
+
+type BulkExportOpts struct {
+ IncludeAttachments bool
+ CreateArchive bool
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/channel.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel.go
index dfa40347..2f353c7c 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/channel.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel.go
@@ -6,6 +6,8 @@ package model
import (
"crypto/sha1"
"encoding/hex"
+ "encoding/json"
+ "errors"
"io"
"net/http"
"sort"
@@ -51,11 +53,11 @@ type Channel struct {
ExtraUpdateAt int64 `json:"extra_update_at"`
CreatorId string `json:"creator_id"`
SchemeId *string `json:"scheme_id"`
- Props map[string]interface{} `json:"props" db:"-"`
+ Props map[string]interface{} `json:"props"`
GroupConstrained *bool `json:"group_constrained"`
Shared *bool `json:"shared"`
TotalMsgCountRoot int64 `json:"total_msg_count_root"`
- PolicyID *string `json:"policy_id" db:"-"`
+ PolicyID *string `json:"policy_id"`
LastRootPostAt int64 `json:"last_root_post_at"`
}
@@ -141,6 +143,8 @@ type ChannelSearchOpts struct {
Private bool
Page *int
PerPage *int
+ LastDeleteAt int
+ LastUpdateAt int
}
type ChannelMemberCountByGroup struct {
@@ -157,6 +161,23 @@ func WithID(ID string) ChannelOption {
}
}
+// The following are some GraphQL methods necessary to return the
+// data in float64 type. The spec doesn't support 64 bit integers,
+// so we have to pass the data in float64. The _ at the end is
+// a hack to keep the attribute name same in GraphQL schema.
+
+func (o *Channel) CreateAt_() float64 {
+ return float64(o.CreateAt)
+}
+
+func (o *Channel) UpdateAt_() float64 {
+ return float64(o.UpdateAt)
+}
+
+func (o *Channel) DeleteAt_() float64 {
+ return float64(o.DeleteAt)
+}
+
func (o *Channel) DeepCopy() *Channel {
copy := *o
if copy.SchemeId != nil {
@@ -303,6 +324,24 @@ func (o *Channel) GetOtherUserIdForDM(userId string) string {
return otherUserId
}
+func (ChannelType) ImplementsGraphQLType(name string) bool {
+ return name == "ChannelType"
+}
+
+func (t ChannelType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(t))
+}
+
+func (t *ChannelType) UnmarshalGraphQL(input interface{}) error {
+ chType, ok := input.(string)
+ if !ok {
+ return errors.New("wrong type")
+ }
+
+ *t = ChannelType(chType)
+ return nil
+}
+
func GetDMNameFromIds(userId1, userId2 string) string {
if userId1 > userId2 {
return userId2 + "__" + userId1
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go
index 324c4f89..cf26d3ea 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_member.go
@@ -60,6 +60,31 @@ type ChannelMember struct {
ExplicitRoles string `json:"explicit_roles"`
}
+// The following are some GraphQL methods necessary to return the
+// data in float64 type. The spec doesn't support 64 bit integers,
+// so we have to pass the data in float64. The _ at the end is
+// a hack to keep the attribute name same in GraphQL schema.
+
+func (o *ChannelMember) LastViewedAt_() float64 {
+ return float64(o.LastViewedAt)
+}
+
+func (o *ChannelMember) MsgCount_() float64 {
+ return float64(o.MsgCount)
+}
+
+func (o *ChannelMember) MentionCount_() float64 {
+ return float64(o.MentionCount)
+}
+
+func (o *ChannelMember) MentionCountRoot_() float64 {
+ return float64(o.MentionCountRoot)
+}
+
+func (o *ChannelMember) LastUpdateAt_() float64 {
+ return float64(o.LastUpdateAt)
+}
+
// ChannelMemberWithTeamData contains ChannelMember appended with extra team information
// as well.
type ChannelMemberWithTeamData struct {
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/channel_sidebar.go b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_sidebar.go
index fd4d44ca..e434e174 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/channel_sidebar.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/channel_sidebar.go
@@ -4,6 +4,8 @@
package model
import (
+ "encoding/json"
+ "errors"
"regexp"
)
@@ -54,6 +56,10 @@ type SidebarCategoryWithChannels struct {
Channels []string `json:"channel_ids"`
}
+func (sc SidebarCategoryWithChannels) ChannelIds() []string {
+ return sc.Channels
+}
+
type SidebarCategoryOrder []string
// OrderedSidebarCategories combines categories, their channel IDs and an array of Category IDs, sorted
@@ -83,3 +89,39 @@ func IsValidCategoryId(s string) bool {
// Or default categories can follow the pattern {type}_{userID}_{teamID}
return categoryIdPattern.MatchString(s)
}
+
+func (SidebarCategoryType) ImplementsGraphQLType(name string) bool {
+ return name == "SidebarCategoryType"
+}
+
+func (t SidebarCategoryType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(t))
+}
+
+func (t *SidebarCategoryType) UnmarshalGraphQL(input interface{}) error {
+ chType, ok := input.(string)
+ if !ok {
+ return errors.New("wrong type")
+ }
+
+ *t = SidebarCategoryType(chType)
+ return nil
+}
+
+func (SidebarCategorySorting) ImplementsGraphQLType(name string) bool {
+ return name == "SidebarCategorySorting"
+}
+
+func (t SidebarCategorySorting) MarshalJSON() ([]byte, error) {
+ return json.Marshal(string(t))
+}
+
+func (t *SidebarCategorySorting) UnmarshalGraphQL(input interface{}) error {
+ chType, ok := input.(string)
+ if !ok {
+ return errors.New("wrong type")
+ }
+
+ *t = SidebarCategorySorting(chType)
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go b/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go
index d4213d15..beabda82 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/client4.go
@@ -46,6 +46,7 @@ const (
APIURLSuffixV1 = "/api/v1"
APIURLSuffixV4 = "/api/v4"
+ APIURLSuffixV5 = "/api/v5"
APIURLSuffix = APIURLSuffixV4
)
@@ -5252,7 +5253,7 @@ func (c *Client4) GetGroupsAssociatedToChannelsByTeam(teamId string, opts GroupS
// GetGroups retrieves Mattermost Groups
func (c *Client4) GetGroups(opts GroupSearchOpts) ([]*Group, *Response, error) {
path := fmt.Sprintf(
- "%s?include_member_count=%v&not_associated_to_team=%v&not_associated_to_channel=%v&filter_allow_reference=%v&q=%v&filter_parent_team_permitted=%v",
+ "%s?include_member_count=%v&not_associated_to_team=%v&not_associated_to_channel=%v&filter_allow_reference=%v&q=%v&filter_parent_team_permitted=%v&group_source=%v",
c.groupsRoute(),
opts.IncludeMemberCount,
opts.NotAssociatedToTeam,
@@ -5260,6 +5261,7 @@ func (c *Client4) GetGroups(opts GroupSearchOpts) ([]*Group, *Response, error) {
opts.FilterAllowReference,
opts.Q,
opts.FilterParentTeamPermitted,
+ opts.Source,
)
if opts.Since > 0 {
path = fmt.Sprintf("%s&since=%v", path, opts.Since)
@@ -7071,6 +7073,36 @@ func (c *Client4) GetGroup(groupID, etag string) (*Group, *Response, error) {
return &g, BuildResponse(r), nil
}
+func (c *Client4) CreateGroup(group *Group) (*Group, *Response, error) {
+ groupJSON, jsonErr := json.Marshal(group)
+ if jsonErr != nil {
+ return nil, nil, NewAppError("CreateGroup", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
+ }
+ r, err := c.DoAPIPostBytes("/groups", groupJSON)
+ if err != nil {
+ return nil, BuildResponse(r), err
+ }
+ defer closeBody(r)
+ var p Group
+ if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil {
+ return nil, nil, NewAppError("CreateGroup", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
+ }
+ return &p, BuildResponse(r), nil
+}
+
+func (c *Client4) DeleteGroup(groupID string) (*Group, *Response, error) {
+ r, err := c.DoAPIDelete(c.groupRoute(groupID))
+ if err != nil {
+ return nil, BuildResponse(r), err
+ }
+ defer closeBody(r)
+ var p Group
+ if jsonErr := json.NewDecoder(r.Body).Decode(&p); jsonErr != nil {
+ return nil, nil, NewAppError("DeleteGroup", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
+ }
+ return &p, BuildResponse(r), nil
+}
+
func (c *Client4) PatchGroup(groupID string, patch *GroupPatch) (*Group, *Response, error) {
payload, _ := json.Marshal(patch)
r, err := c.DoAPIPut(c.groupRoute(groupID)+"/patch", string(payload))
@@ -7085,6 +7117,40 @@ func (c *Client4) PatchGroup(groupID string, patch *GroupPatch) (*Group, *Respon
return &g, BuildResponse(r), nil
}
+func (c *Client4) UpsertGroupMembers(groupID string, userIds *GroupModifyMembers) ([]*GroupMember, *Response, error) {
+ payload, jsonErr := json.Marshal(userIds)
+ if jsonErr != nil {
+ return nil, nil, NewAppError("UpsertGroupMembers", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
+ }
+ r, err := c.DoAPIPostBytes(c.groupRoute(groupID)+"/members", payload)
+ if err != nil {
+ return nil, BuildResponse(r), err
+ }
+ defer closeBody(r)
+ var g []*GroupMember
+ if jsonErr := json.NewDecoder(r.Body).Decode(&g); jsonErr != nil {
+ return nil, nil, NewAppError("UpsertGroupMembers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
+ }
+ return g, BuildResponse(r), nil
+}
+
+func (c *Client4) DeleteGroupMembers(groupID string, userIds *GroupModifyMembers) ([]*GroupMember, *Response, error) {
+ payload, jsonErr := json.Marshal(userIds)
+ if jsonErr != nil {
+ return nil, nil, NewAppError("DeleteGroupMembers", "api.marshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
+ }
+ r, err := c.DoAPIDeleteBytes(c.groupRoute(groupID)+"/members", payload)
+ if err != nil {
+ return nil, BuildResponse(r), err
+ }
+ defer closeBody(r)
+ var g []*GroupMember
+ if jsonErr := json.NewDecoder(r.Body).Decode(&g); jsonErr != nil {
+ return nil, nil, NewAppError("DeleteGroupMembers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
+ }
+ return g, BuildResponse(r), nil
+}
+
func (c *Client4) LinkGroupSyncable(groupID, syncableID string, syncableType GroupSyncableType, patch *GroupSyncablePatch) (*GroupSyncable, *Response, error) {
payload, _ := json.Marshal(patch)
url := fmt.Sprintf("%s/link", c.groupSyncableRoute(groupID, syncableID, syncableType))
@@ -7435,6 +7501,20 @@ func (c *Client4) MarkNoticesViewed(ids []string) (*Response, error) {
return BuildResponse(r), nil
}
+func (c *Client4) CompleteOnboarding(request *CompleteOnboardingRequest) (*Response, error) {
+ buf, err := json.Marshal(request)
+ if err != nil {
+ return nil, NewAppError("CompleteOnboarding", "api.marshal_error", nil, err.Error(), http.StatusInternalServerError)
+ }
+ r, err := c.DoAPIPost(c.systemRoute()+"/onboarding/complete", string(buf))
+ if err != nil {
+ return BuildResponse(r), err
+ }
+ defer closeBody(r)
+
+ return BuildResponse(r), nil
+}
+
// CreateUpload creates a new upload session.
func (c *Client4) CreateUpload(us *UploadSession) (*UploadSession, *Response, error) {
buf, err := json.Marshal(us)
@@ -7834,3 +7914,20 @@ func (c *Client4) GetAncillaryPermissions(subsectionPermissions []string) ([]str
json.NewDecoder(r.Body).Decode(&returnedPermissions)
return returnedPermissions, BuildResponse(r), nil
}
+
+func (c *Client4) GetUsersWithInvalidEmails(page, perPage int) ([]*User, *Response, error) {
+ query := fmt.Sprintf("/invalid_emails?page=%v&per_page=%v", page, perPage)
+ r, err := c.DoAPIGet(c.usersRoute()+query, "")
+ if err != nil {
+ return nil, BuildResponse(r), err
+ }
+ defer closeBody(r)
+ var list []*User
+ if r.StatusCode == http.StatusNotModified {
+ return list, BuildResponse(r), nil
+ }
+ if jsonErr := json.NewDecoder(r.Body).Decode(&list); jsonErr != nil {
+ return nil, nil, NewAppError("GetUsers", "api.unmarshal_error", nil, jsonErr.Error(), http.StatusInternalServerError)
+ }
+ return list, BuildResponse(r), nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/config.go b/vendor/github.com/mattermost/mattermost-server/v6/model/config.go
index d70b06b5..04dfa4f5 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/config.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/config.go
@@ -114,7 +114,7 @@ const (
TeamSettingsDefaultCustomDescriptionText = ""
TeamSettingsDefaultUserStatusAwayTimeout = 300
- SqlSettingsDefaultDataSource = "postgres://mmuser:mostest@localhost/mattermost_test?sslmode=disable&connect_timeout=10"
+ SqlSettingsDefaultDataSource = "postgres://mmuser:mostest@localhost/mattermost_test?sslmode=disable&connect_timeout=10&binary_parameters=yes"
FileSettingsDefaultDirectory = "./data/"
@@ -305,6 +305,7 @@ type ServiceSettings struct {
EnableTesting *bool `access:"environment_developer,write_restrictable,cloud_restrictable"`
EnableDeveloper *bool `access:"environment_developer,write_restrictable,cloud_restrictable"`
DeveloperFlags *string `access:"environment_developer"`
+ EnableClientPerformanceDebugging *bool `access:"environment_developer,write_restrictable,cloud_restrictable"`
EnableOpenTracing *bool `access:"write_restrictable,cloud_restrictable"`
EnableSecurityFixAlert *bool `access:"environment_smtp,write_restrictable,cloud_restrictable"`
EnableInsecureOutgoingConnections *bool `access:"environment_web_server,write_restrictable,cloud_restrictable"`
@@ -366,6 +367,7 @@ type ServiceSettings struct {
ThreadAutoFollow *bool `access:"experimental_features"`
CollapsedThreads *string `access:"experimental_features"`
ManagedResourcePaths *string `access:"environment_web_server,write_restrictable,cloud_restrictable"`
+ EnableCustomGroups *bool `access:"site_users_and_teams"`
}
func (s *ServiceSettings) SetDefaults(isUpdate bool) {
@@ -422,6 +424,10 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
s.DeveloperFlags = NewString("")
}
+ if s.EnableClientPerformanceDebugging == nil {
+ s.EnableClientPerformanceDebugging = NewBool(false)
+ }
+
if s.EnableOpenTracing == nil {
s.EnableOpenTracing = NewBool(false)
}
@@ -782,6 +788,10 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
if s.ManagedResourcePaths == nil {
s.ManagedResourcePaths = NewString("")
}
+
+ if s.EnableCustomGroups == nil {
+ s.EnableCustomGroups = NewBool(true)
+ }
}
type ClusterSettings struct {
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/custom_status.go b/vendor/github.com/mattermost/mattermost-server/v6/model/custom_status.go
index 2f5084b8..74cd9240 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/custom_status.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/custom_status.go
@@ -8,6 +8,8 @@ import (
"encoding/json"
"fmt"
"time"
+
+ "github.com/graph-gophers/graphql-go"
)
const (
@@ -61,6 +63,12 @@ func (cs *CustomStatus) AreDurationAndExpirationTimeValid() bool {
return false
}
+// ExpiresAt_ returns the time in a type that has the marshal/unmarshal methods
+// attached to it.
+func (cs *CustomStatus) ExpiresAt_() graphql.Time {
+ return graphql.Time{Time: cs.ExpiresAt}
+}
+
func RuneToHexadecimalString(r rune) string {
return fmt.Sprintf("%04x", r)
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/emoji.go b/vendor/github.com/mattermost/mattermost-server/v6/model/emoji.go
index fd0e8ab3..4b30ee43 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/emoji.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/emoji.go
@@ -78,9 +78,12 @@ func (emoji *Emoji) IsValid() *AppError {
}
func IsValidEmojiName(name string) *AppError {
- if name == "" || len(name) > EmojiNameMaxLength || !IsValidAlphaNumHyphenUnderscorePlus(name) || inSystemEmoji(name) {
+ if name == "" || len(name) > EmojiNameMaxLength || !IsValidAlphaNumHyphenUnderscorePlus(name) {
return NewAppError("Emoji.IsValid", "model.emoji.name.app_error", nil, "", http.StatusBadRequest)
}
+ if inSystemEmoji(name) {
+ return NewAppError("Emoji.IsValid", "model.emoji.system_emoji_name.app_error", nil, "", http.StatusBadRequest)
+ }
return nil
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go b/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go
index 6998a03a..a2fe45bd 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/feature_flags.go
@@ -41,18 +41,12 @@ type FeatureFlags struct {
// Enable the Global Header
GlobalHeader bool
- // Enable different team menu button treatments, possible values = ("none", "by_team_name", "inverted_sidebar_bg_color")
- AddChannelButton string
-
// Determine whether when a user gets created, they'll have noisy notifications e.g. Send desktop notifications for all activity
NewAccountNoisy bool
// Enable Calls plugin support in the mobile app
CallsMobile bool
- // Start A/B tour tips automatically, possible values = ("none", "auto")
- AutoTour string
-
// A dash separated list for feature flags to turn on for Boards
BoardsFeatureFlags string
@@ -68,6 +62,8 @@ type FeatureFlags struct {
// A/B test for whether radio buttons or toggle button is more effective in in-screen invite to team modal ("none", "toggle")
InviteToTeam string
+ CustomGroups bool
+
// Enable inline post editing
InlinePostEditing bool
@@ -75,6 +71,17 @@ type FeatureFlags struct {
BoardsDataRetention bool
NormalizeLdapDNs bool
+
+ EnableInactivityCheckJob bool
+
+ // Enable special onboarding flow for first admin
+ UseCaseOnboarding bool
+
+ // Enable Workspace optimization dashboard
+ WorkspaceOptimizationDashboard bool
+
+ // Enable GraphQL feature
+ GraphQL bool
}
func (f *FeatureFlags) SetDefaults() {
@@ -89,20 +96,22 @@ func (f *FeatureFlags) SetDefaults() {
f.PluginFocalboard = ""
f.PermalinkPreviews = true
f.GlobalHeader = true
- f.AddChannelButton = "by_team_name"
f.NewAccountNoisy = false
f.CallsMobile = false
- f.AutoTour = "none"
f.BoardsFeatureFlags = ""
f.AddMembersToChannel = "top"
f.GuidedChannelCreation = false
f.ResendInviteEmailInterval = ""
f.InviteToTeam = "none"
+ f.CustomGroups = true
f.InlinePostEditing = false
f.BoardsDataRetention = false
f.NormalizeLdapDNs = false
+ f.EnableInactivityCheckJob = true
+ f.UseCaseOnboarding = true
+ f.WorkspaceOptimizationDashboard = true
+ f.GraphQL = false
}
-
func (f *FeatureFlags) Plugins() map[string]string {
rFFVal := reflect.ValueOf(f).Elem()
rFFType := reflect.TypeOf(f).Elem()
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/github_release.go b/vendor/github.com/mattermost/mattermost-server/v6/model/github_release.go
new file mode 100644
index 00000000..75cc0a5f
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/github_release.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package model
+
+import (
+ "net/http"
+)
+
+type GithubReleaseInfo struct {
+ Id int `json:"id"`
+ TagName string `json:"tag_name"`
+ Name string `json:"name"`
+ CreatedAt string `json:"created_at"`
+ PublishedAt string `json:"published_at"`
+ Body string `json:"body"`
+ Url string `json:"html_url"`
+}
+
+func (g *GithubReleaseInfo) IsValid() *AppError {
+ if g.Id == 0 {
+ return NewAppError("GithubReleaseInfo.IsValid", "model.github_release_info.is_valid.id.app_error", nil, "", http.StatusInternalServerError)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/group.go b/vendor/github.com/mattermost/mattermost-server/v6/model/group.go
index 566b2361..428c431a 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/group.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/group.go
@@ -9,7 +9,8 @@ import (
)
const (
- GroupSourceLdap GroupSource = "ldap"
+ GroupSourceLdap GroupSource = "ldap"
+ GroupSourceCustom GroupSource = "custom"
GroupNameMaxLength = 64
GroupSourceMaxLength = 64
@@ -22,6 +23,7 @@ type GroupSource string
var allGroupSources = []GroupSource{
GroupSourceLdap,
+ GroupSourceCustom,
}
var groupSourcesRequiringRemoteID = []GroupSource{
@@ -34,7 +36,7 @@ type Group struct {
DisplayName string `json:"display_name"`
Description string `json:"description"`
Source GroupSource `json:"source"`
- RemoteId string `json:"remote_id"`
+ RemoteId *string `json:"remote_id"`
CreateAt int64 `json:"create_at"`
UpdateAt int64 `json:"update_at"`
DeleteAt int64 `json:"delete_at"`
@@ -43,6 +45,11 @@ type Group struct {
AllowReference bool `json:"allow_reference"`
}
+type GroupWithUserIds struct {
+ Group
+ UserIds []string `json:"user_ids"`
+}
+
type GroupWithSchemeAdmin struct {
Group
SchemeAdmin *bool `db:"SyncableSchemeAdmin" json:"scheme_admin,omitempty"`
@@ -63,6 +70,8 @@ type GroupPatch struct {
DisplayName *string `json:"display_name"`
Description *string `json:"description"`
AllowReference *bool `json:"allow_reference"`
+ // For security reasons (including preventing unintended LDAP group synchronization) do no allow a Group's RemoteId or Source field to be
+ // included in patches.
}
type LdapGroupSearchOpts struct {
@@ -79,12 +88,21 @@ type GroupSearchOpts struct {
FilterAllowReference bool
PageOpts *PageOpts
Since int64
+ Source GroupSource
// FilterParentTeamPermitted filters the groups to the intersect of the
// set associated to the parent team and those returned by the query.
// If the parent team is not group-constrained or if NotAssociatedToChannel
// is not set then this option is ignored.
FilterParentTeamPermitted bool
+
+ // FilterHasMember filters the groups to the intersect of the
+ // set returned by the query and those that have the given user as a member.
+ FilterHasMember string
+}
+
+type GetGroupOpts struct {
+ IncludeMemberCount bool
}
type PageOpts struct {
@@ -97,6 +115,10 @@ type GroupStats struct {
TotalMemberCount int64 `json:"total_member_count"`
}
+type GroupModifyMembers struct {
+ UserIds []string `json:"user_ids"`
+}
+
func (group *Group) Patch(patch *GroupPatch) {
if patch.Name != nil {
group.Name = patch.Name
@@ -137,7 +159,7 @@ func (group *Group) IsValidForCreate() *AppError {
return NewAppError("Group.IsValidForCreate", "model.group.source.app_error", nil, "", http.StatusBadRequest)
}
- if len(group.RemoteId) > GroupRemoteIDMaxLength || (group.RemoteId == "" && group.requiresRemoteId()) {
+ if (group.GetRemoteId() == "" && group.requiresRemoteId()) || len(group.GetRemoteId()) > GroupRemoteIDMaxLength {
return NewAppError("Group.IsValidForCreate", "model.group.remote_id.app_error", nil, "", http.StatusBadRequest)
}
@@ -188,3 +210,22 @@ func (group *Group) IsValidName() *AppError {
}
return nil
}
+
+func (group *Group) GetName() string {
+ if group.Name == nil {
+ return ""
+ }
+ return *group.Name
+}
+
+func (group *Group) GetRemoteId() string {
+ if group.RemoteId == nil {
+ return ""
+ }
+ return *group.RemoteId
+}
+
+type GroupsWithCount struct {
+ Groups []*Group `json:"groups"`
+ TotalCount int64 `json:"total_count"`
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/job.go b/vendor/github.com/mattermost/mattermost-server/v6/model/job.go
index e892b051..8b6272e8 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/job.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/job.go
@@ -78,29 +78,6 @@ func (j *Job) IsValid() *AppError {
return NewAppError("Job.IsValid", "model.job.is_valid.create_at.app_error", nil, "id="+j.Id, http.StatusBadRequest)
}
- switch j.Type {
- case JobTypeDataRetention:
- case JobTypeElasticsearchPostIndexing:
- case JobTypeElasticsearchPostAggregation:
- case JobTypeBlevePostIndexing:
- case JobTypeLdapSync:
- case JobTypeMessageExport:
- case JobTypeMigrations:
- case JobTypePlugins:
- case JobTypeProductNotices:
- case JobTypeExpiryNotify:
- case JobTypeActiveUsers:
- case JobTypeImportProcess:
- case JobTypeImportDelete:
- case JobTypeExportProcess:
- case JobTypeExportDelete:
- case JobTypeCloud:
- case JobTypeResendInvitationEmail:
- case JobTypeExtractContent:
- default:
- return NewAppError("Job.IsValid", "model.job.is_valid.type.app_error", nil, "id="+j.Id, http.StatusBadRequest)
- }
-
switch j.Status {
case JobStatusPending:
case JobStatusInProgress:
@@ -119,11 +96,10 @@ type Worker interface {
Run()
Stop()
JobChannel() chan<- Job
+ IsEnabled(cfg *Config) bool
}
type Scheduler interface {
- Name() string
- JobType() string
Enabled(cfg *Config) bool
NextScheduleTime(cfg *Config, now time.Time, pendingJobs bool, lastSuccessfulJob *Job) *time.Time
ScheduleJob(cfg *Config, pendingJobs bool, lastSuccessfulJob *Job) (*Job, *AppError)
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/license.go b/vendor/github.com/mattermost/mattermost-server/v6/model/license.go
index dbdc296e..c8a95305 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/license.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/license.go
@@ -323,6 +323,12 @@ func NewTestLicense(features ...string) *License {
return ret
}
+func NewTestLicenseSKU(skuShortName string, features ...string) *License {
+ lic := NewTestLicense(features...)
+ lic.SkuShortName = skuShortName
+ return lic
+}
+
func (lr *LicenseRecord) IsValid() *AppError {
if !IsValidId(lr.Id) {
return NewAppError("LicenseRecord.IsValid", "model.license_record.is_valid.id.app_error", nil, "", http.StatusBadRequest)
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/migration.go b/vendor/github.com/mattermost/mattermost-server/v6/model/migration.go
index 629189bb..16172e50 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/migration.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/migration.go
@@ -36,4 +36,5 @@ const (
MigrationKeyAddAboutSubsectionPermissions = "about_subsection_permissions"
MigrationKeyAddIntegrationsSubsectionPermissions = "integrations_subsection_permissions"
MigrationKeyAddPlaybooksPermissions = "playbooks_permissions"
+ MigrationKeyAddCustomUserGroupsPermissions = "custom_groups_permissions"
)
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/onboarding.go b/vendor/github.com/mattermost/mattermost-server/v6/model/onboarding.go
new file mode 100644
index 00000000..9c83e376
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/onboarding.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package model
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// CompleteOnboardingRequest describes parameters of the requested plugin.
+type CompleteOnboardingRequest struct {
+ InstallPlugins []string `json:"install_plugins"` // InstallPlugins is a list of plugins to be installed
+}
+
+// CompleteOnboardingRequest decodes a json-encoded request from the given io.Reader.
+func CompleteOnboardingRequestFromReader(reader io.Reader) (*CompleteOnboardingRequest, error) {
+ var r *CompleteOnboardingRequest
+ err := json.NewDecoder(reader).Decode(&r)
+ if err != nil {
+ return nil, err
+ }
+
+ return r, nil
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/permission.go b/vendor/github.com/mattermost/mattermost-server/v6/model/permission.go
index 9a3e4aae..e8d9d2c3 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/permission.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/permission.go
@@ -7,6 +7,7 @@ const (
PermissionScopeSystem = "system_scope"
PermissionScopeTeam = "team_scope"
PermissionScopeChannel = "channel_scope"
+ PermissionScopeGroup = "group_scope"
PermissionScopePlaybook = "playbook_scope"
PermissionScopeRun = "run_scope"
)
@@ -355,6 +356,11 @@ var PermissionRunView *Permission
// admin functions but not others
var PermissionManageSystem *Permission
+var PermissionCreateCustomGroup *Permission
+var PermissionManageCustomGroupMembers *Permission
+var PermissionEditCustomGroup *Permission
+var PermissionDeleteCustomGroup *Permission
+
var AllPermissions []*Permission
var DeprecatedPermissions []*Permission
@@ -1914,6 +1920,34 @@ func initializePermissions() {
PermissionScopeSystem,
}
+ PermissionCreateCustomGroup = &Permission{
+ "create_custom_group",
+ "authentication.permissions.create_custom_group.name",
+ "authentication.permissions.create_custom_group.description",
+ PermissionScopeSystem,
+ }
+
+ PermissionManageCustomGroupMembers = &Permission{
+ "manage_custom_group_members",
+ "authentication.permissions.manage_custom_group_members.name",
+ "authentication.permissions.manage_custom_group_members.description",
+ PermissionScopeGroup,
+ }
+
+ PermissionEditCustomGroup = &Permission{
+ "edit_custom_group",
+ "authentication.permissions.edit_custom_group.name",
+ "authentication.permissions.edit_custom_group.description",
+ PermissionScopeGroup,
+ }
+
+ PermissionDeleteCustomGroup = &Permission{
+ "delete_custom_group",
+ "authentication.permissions.delete_custom_group.name",
+ "authentication.permissions.delete_custom_group.description",
+ PermissionScopeGroup,
+ }
+
// Playbooks
PermissionPublicPlaybookCreate = &Permission{
"playbook_public_create",
@@ -2200,6 +2234,7 @@ func initializePermissions() {
PermissionGetLogs,
PermissionReadLicenseInformation,
PermissionManageLicenseInformation,
+ PermissionCreateCustomGroup,
}
TeamScopedPermissions := []*Permission{
@@ -2259,6 +2294,12 @@ func initializePermissions() {
PermissionUseGroupMentions,
}
+ GroupScopedPermissions := []*Permission{
+ PermissionManageCustomGroupMembers,
+ PermissionEditCustomGroup,
+ PermissionDeleteCustomGroup,
+ }
+
DeprecatedPermissions = []*Permission{
PermissionPermanentDeleteUser,
PermissionManageWebhooks,
@@ -2307,6 +2348,7 @@ func initializePermissions() {
AllPermissions = append(AllPermissions, ChannelScopedPermissions...)
AllPermissions = append(AllPermissions, SysconsoleReadPermissions...)
AllPermissions = append(AllPermissions, SysconsoleWritePermissions...)
+ AllPermissions = append(AllPermissions, GroupScopedPermissions...)
AllPermissions = append(AllPermissions, PlaybookScopedPermissions...)
AllPermissions = append(AllPermissions, RunScopedPermissions...)
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_on_install_event.go b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_on_install_event.go
new file mode 100644
index 00000000..186fd5bd
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/plugin_on_install_event.go
@@ -0,0 +1,9 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package model
+
+// OnInstallEvent is sent to the plugin when it gets installed.
+type OnInstallEvent struct {
+ UserId string // The user who installed the plugin
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/post.go b/vendor/github.com/mattermost/mattermost-server/v6/model/post.go
index 87c1f338..8736de3a 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/post.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/post.go
@@ -87,7 +87,7 @@ type Post struct {
// MessageSource will contain the message as submitted by the user if Message has been modified
// by Mattermost for presentation (e.g if an image proxy is being used). It should be used to
// populate edit boxes if present.
- MessageSource string `json:"message_source,omitempty" db:"-"`
+ MessageSource string `json:"message_source,omitempty"`
Type string `json:"type"`
propsMu sync.RWMutex `db:"-"` // Unexported mutex used to guard Post.Props.
@@ -95,16 +95,16 @@ type Post struct {
Hashtags string `json:"hashtags"`
Filenames StringArray `json:"-"` // Deprecated, do not use this field any more
FileIds StringArray `json:"file_ids,omitempty"`
- PendingPostId string `json:"pending_post_id" db:"-"`
+ PendingPostId string `json:"pending_post_id"`
HasReactions bool `json:"has_reactions,omitempty"`
RemoteId *string `json:"remote_id,omitempty"`
// Transient data populated before sending a post to the client
- ReplyCount int64 `json:"reply_count" db:"-"`
- LastReplyAt int64 `json:"last_reply_at" db:"-"`
- Participants []*User `json:"participants" db:"-"`
- IsFollowing *bool `json:"is_following,omitempty" db:"-"` // for root posts in collapsed thread mode indicates if the current user is following this thread
- Metadata *PostMetadata `json:"metadata,omitempty" db:"-"`
+ ReplyCount int64 `json:"reply_count"`
+ LastReplyAt int64 `json:"last_reply_at"`
+ Participants []*User `json:"participants"`
+ IsFollowing *bool `json:"is_following,omitempty"` // for root posts in collapsed thread mode indicates if the current user is following this thread
+ Metadata *PostMetadata `json:"metadata,omitempty"`
}
type PostEphemeral struct {
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/role.go b/vendor/github.com/mattermost/mattermost-server/v6/model/role.go
index 0dcb405c..37edec4f 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/role.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/role.go
@@ -4,6 +4,7 @@
package model
import (
+ "fmt"
"strings"
)
@@ -42,6 +43,8 @@ func init() {
ChannelUserRoleId,
ChannelAdminRoleId,
+ CustomGroupUserRoleId,
+
PlaybookAdminRoleId,
PlaybookMemberRoleId,
RunAdminRoleId,
@@ -367,6 +370,8 @@ const (
ChannelUserRoleId = "channel_user"
ChannelAdminRoleId = "channel_admin"
+ CustomGroupUserRoleId = "custom_group_user"
+
PlaybookAdminRoleId = "playbook_admin"
PlaybookMemberRoleId = "playbook_member"
RunAdminRoleId = "run_admin"
@@ -379,6 +384,7 @@ const (
RoleScopeSystem RoleScope = "System"
RoleScopeTeam RoleScope = "Team"
RoleScopeChannel RoleScope = "Channel"
+ RoleScopeGroup RoleScope = "Group"
RoleTypeGuest RoleType = "Guest"
RoleTypeUser RoleType = "User"
@@ -683,6 +689,13 @@ func IsValidRoleName(roleName string) bool {
func MakeDefaultRoles() map[string]*Role {
roles := make(map[string]*Role)
+ roles[CustomGroupUserRoleId] = &Role{
+ Name: CustomGroupUserRoleId,
+ DisplayName: fmt.Sprintf("authentication.roles.%s.name", CustomGroupUserRoleId),
+ Description: fmt.Sprintf("authentication.roles.%s.description", CustomGroupUserRoleId),
+ Permissions: []string{},
+ }
+
roles[ChannelGuestRoleId] = &Role{
Name: "channel_guest",
DisplayName: "authentication.roles.channel_guest.name",
@@ -895,6 +908,10 @@ func MakeDefaultRoles() map[string]*Role {
PermissionCreateGroupChannel.Id,
PermissionViewMembers.Id,
PermissionCreateTeam.Id,
+ PermissionCreateCustomGroup.Id,
+ PermissionEditCustomGroup.Id,
+ PermissionDeleteCustomGroup.Id,
+ PermissionManageCustomGroupMembers.Id,
},
SchemeManaged: true,
BuiltIn: true,
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/status.go b/vendor/github.com/mattermost/mattermost-server/v6/model/status.go
index 45a6d5d2..5a5e9425 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/status.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/status.go
@@ -34,6 +34,19 @@ func (s *Status) ToJSON() ([]byte, error) {
return json.Marshal(sCopy)
}
+// The following are some GraphQL methods necessary to return the
+// data in float64 type. The spec doesn't support 64 bit integers,
+// so we have to pass the data in float64. The _ at the end is
+// a hack to keep the attribute name same in GraphQL schema.
+
+func (s *Status) LastActivityAt_() float64 {
+ return float64(s.LastActivityAt)
+}
+
+func (s *Status) DNDEndTime_() float64 {
+ return float64(s.DNDEndTime)
+}
+
func StatusListToJSON(u []*Status) ([]byte, error) {
list := make([]Status, len(u))
for i, s := range u {
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/system.go b/vendor/github.com/mattermost/mattermost-server/v6/model/system.go
index c8bcaba2..b1b9ca19 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/system.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/system.go
@@ -30,8 +30,8 @@ const (
SystemWarnMetricNumberOfActiveUsers500 = "warn_metric_number_of_active_users_500"
SystemWarnMetricNumberOfPosts2m = "warn_metric_number_of_posts_2M"
SystemWarnMetricLastRunTimestampKey = "LastWarnMetricRunTimestamp"
- SystemMetricSupportEmailNotConfigured = "warn_metric_support_email_not_configured"
SystemFirstAdminVisitMarketplace = "FirstAdminVisitMarketplace"
+ SystemFirstAdminSetupComplete = "FirstAdminSetupComplete"
AwsMeteringReportInterval = 1
AwsMeteringDimensionUsageHrs = "UsageHrs"
UserLimitOverageCycleEndDate = "UserLimitOverageCycleEndDate"
@@ -147,13 +147,6 @@ var WarnMetricsTable = map[string]WarnMetric{
IsBotOnly: false,
IsRunOnce: true,
},
- SystemMetricSupportEmailNotConfigured: {
- Id: SystemMetricSupportEmailNotConfigured,
- Limit: -1,
- IsBotOnly: true,
- IsRunOnce: false,
- SkipAction: true,
- },
}
type WarnMetric struct {
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/team.go b/vendor/github.com/mattermost/mattermost-server/v6/model/team.go
index d18d1620..a5aa7f94 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/team.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/team.go
@@ -40,7 +40,7 @@ type Team struct {
LastTeamIconUpdate int64 `json:"last_team_icon_update,omitempty"`
SchemeId *string `json:"scheme_id"`
GroupConstrained *bool `json:"group_constrained"`
- PolicyID *string `json:"policy_id" db:"-"`
+ PolicyID *string `json:"policy_id"`
}
type TeamPatch struct {
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go b/vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go
index 70fd40c4..45a22803 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/team_member.go
@@ -125,3 +125,9 @@ func (o *TeamMember) PreUpdate() {
func (o *TeamMember) GetRoles() []string {
return strings.Fields(o.Roles)
}
+
+// DeleteAt_ returns the deleteAt value in float64. This is necessary to work
+// with GraphQL since it doesn't support 64 bit integers.
+func (o *TeamMember) DeleteAt_() float64 {
+ return float64(o.DeleteAt)
+}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/thread.go b/vendor/github.com/mattermost/mattermost-server/v6/model/thread.go
index e774e87a..89985709 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/thread.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/thread.go
@@ -3,11 +3,24 @@
package model
+// Thread tracks the metadata associated with a root post and its reply posts.
+//
+// Note that Thread metadata does not exist until the first reply to a root post.
type Thread struct {
- PostId string `json:"id"`
- ChannelId string `json:"channel_id"`
- ReplyCount int64 `json:"reply_count"`
- LastReplyAt int64 `json:"last_reply_at"`
+ // PostId is the root post of the thread.
+ PostId string `json:"id"`
+
+ // ChannelId is the channel in which the thread was posted.
+ ChannelId string `json:"channel_id"`
+
+ // ReplyCount is the number of replies to the thread (excluding deleted posts).
+ ReplyCount int64 `json:"reply_count"`
+
+ // LastReplyAt is the timestamp of the most recent post to the thread.
+ LastReplyAt int64 `json:"last_reply_at"`
+
+ // Participants is a list of user ids that have replied to the thread, sorted by the oldest
+ // to newest. Note that the root post author is not included in this list until they reply.
Participants StringArray `json:"participants"`
}
@@ -62,11 +75,37 @@ func (o *Thread) Etag() string {
return Etag(o.PostId, o.LastReplyAt)
}
+// ThreadMembership models the relationship between a user and a thread of posts, with a similar
+// data structure as ChannelMembership.
type ThreadMembership struct {
- PostId string `json:"post_id"`
- UserId string `json:"user_id"`
- Following bool `json:"following"`
- LastViewed int64 `json:"last_view_at"`
- LastUpdated int64 `json:"last_update_at"`
- UnreadMentions int64 `json:"unread_mentions"`
+ // PostId is the root post id of the thread in question.
+ PostId string `json:"post_id"`
+
+ // UserId is the user whose membership in the thread is being tracked.
+ UserId string `json:"user_id"`
+
+ // Following tracks whether the user is following the given thread. This defaults to true
+ // when a ThreadMembership record is created (a record doesn't exist until the user first
+ // starts following the thread), but the user can stop following or resume following at
+ // will.
+ Following bool `json:"following"`
+
+ // LastUpdated is either the creation time of the membership record, or the last time the
+ // membership record was changed (e.g. started/stopped following, viewed thread, mention
+ // count change).
+ //
+ // This field is used to constrain queries of thread memberships to those updated after
+ // a given timestamp (e.g. on websocket reconnect). It's also used as the time column for
+ // deletion decisions during any configured retention policy.
+ LastUpdated int64 `json:"last_update_at"`
+
+ // LastViewed is the last time the user viewed this thread. It is the thread analogue to
+ // the ChannelMembership's LastViewedAt and is used to decide when there are new replies
+ // for the user and where the user should start reading.
+ LastViewed int64 `json:"last_view_at"`
+
+ // UnreadMentions is the number of unseen at-mentions for the user in the given thread. It
+ // is the thread analogue to the ChannelMembership's MentionCount, and is used to highlight
+ // threads with the mention count.
+ UnreadMentions int64 `json:"unread_mentions"`
}
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/upload_session.go b/vendor/github.com/mattermost/mattermost-server/v6/model/upload_session.go
index 994e7fb3..0fb54ce6 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/upload_session.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/upload_session.go
@@ -12,8 +12,9 @@ import (
type UploadType string
const (
- UploadTypeAttachment UploadType = "attachment"
- UploadTypeImport UploadType = "import"
+ UploadTypeAttachment UploadType = "attachment"
+ UploadTypeImport UploadType = "import"
+ IncompleteUploadSuffix = ".tmp"
)
// UploadNoUserID is a "fake" user id used by the API layer when in local mode.
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/user.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user.go
index 5035f9a5..698f3377 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/user.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user.go
@@ -95,13 +95,13 @@ type User struct {
MfaActive bool `json:"mfa_active,omitempty"`
MfaSecret string `json:"mfa_secret,omitempty"`
RemoteId *string `json:"remote_id,omitempty"`
- LastActivityAt int64 `db:"-" json:"last_activity_at,omitempty"`
- IsBot bool `db:"-" json:"is_bot,omitempty"`
- BotDescription string `db:"-" json:"bot_description,omitempty"`
- BotLastIconUpdate int64 `db:"-" json:"bot_last_icon_update,omitempty"`
- TermsOfServiceId string `db:"-" json:"terms_of_service_id,omitempty"`
- TermsOfServiceCreateAt int64 `db:"-" json:"terms_of_service_create_at,omitempty"`
- DisableWelcomeEmail bool `db:"-" json:"disable_welcome_email"`
+ LastActivityAt int64 `json:"last_activity_at,omitempty"`
+ IsBot bool `json:"is_bot,omitempty"`
+ BotDescription string `json:"bot_description,omitempty"`
+ BotLastIconUpdate int64 `json:"bot_last_icon_update,omitempty"`
+ TermsOfServiceId string `json:"terms_of_service_id,omitempty"`
+ TermsOfServiceCreateAt int64 `json:"terms_of_service_create_at,omitempty"`
+ DisableWelcomeEmail bool `json:"disable_welcome_email"`
}
//msgp UserMap
@@ -409,6 +409,23 @@ func (u *User) PreSave() {
}
}
+// The following are some GraphQL methods necessary to return the
+// data in float64 type. The spec doesn't support 64 bit integers,
+// so we have to pass the data in float64. The _ at the end is
+// a hack to keep the attribute name same in GraphQL schema.
+
+func (u *User) CreateAt_() float64 {
+ return float64(u.CreateAt)
+}
+
+func (u *User) DeleteAt_() float64 {
+ return float64(u.DeleteAt)
+}
+
+func (u *User) LastPictureUpdateAt() float64 {
+ return float64(u.LastPictureUpdate)
+}
+
// PreUpdate should be run before updating the user in the db.
func (u *User) PreUpdate() {
u.Username = SanitizeUnicode(u.Username)
@@ -630,6 +647,15 @@ func (u *User) GetCustomStatus() *CustomStatus {
return o
}
+func (u *User) CustomStatus() *CustomStatus {
+ var o *CustomStatus
+
+ data := u.Props[UserPropsKeyCustomStatus]
+ _ = json.Unmarshal([]byte(data), &o)
+
+ return o
+}
+
func (u *User) ClearCustomStatus() {
u.MakeNonNil()
u.Props[UserPropsKeyCustomStatus] = ""
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/user_get.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user_get.go
index 2748d735..0ba62f3f 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/user_get.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user_get.go
@@ -14,6 +14,8 @@ type UserGetOptions struct {
NotInChannelId string
// Filters the users in the group
InGroupId string
+ // Filters the users not in the group
+ NotInGroupId string
// Filters the users group constrained
GroupConstrained bool
// Filters the users without a team
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/user_search.go b/vendor/github.com/mattermost/mattermost-server/v6/model/user_search.go
index 93bf6009..d0480fe5 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/user_search.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/user_search.go
@@ -22,6 +22,7 @@ type UserSearch struct {
Roles []string `json:"roles"`
ChannelRoles []string `json:"channel_roles"`
TeamRoles []string `json:"team_roles"`
+ NotInGroupId string `json:"not_in_group_id"`
}
// UserSearchOptions captures internal parameters derived from the user's permissions and a
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/utils.go b/vendor/github.com/mattermost/mattermost-server/v6/model/utils.go
index c88d9100..4a6f633e 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/utils.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/utils.go
@@ -132,6 +132,24 @@ func (m StringMap) Value() (driver.Value, error) {
return string(j), err
}
+func (StringMap) ImplementsGraphQLType(name string) bool {
+ return name == "StringMap"
+}
+
+func (m StringMap) MarshalJSON() ([]byte, error) {
+ return json.Marshal((map[string]string)(m))
+}
+
+func (m *StringMap) UnmarshalGraphQL(input interface{}) error {
+ json, ok := input.(map[string]string)
+ if !ok {
+ return errors.New("wrong type")
+ }
+
+ *m = json
+ return nil
+}
+
func (si *StringInterface) Scan(value interface{}) error {
if value == nil {
return nil
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/version.go b/vendor/github.com/mattermost/mattermost-server/v6/model/version.go
index 673d4be9..144b0303 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/version.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/version.go
@@ -13,8 +13,7 @@ import (
// It should be maintained in chronological order with most current
// release at the front of the list.
var versions = []string{
- "6.4.2",
- "6.4.1",
+ "6.5.0",
"6.4.0",
"6.3.0",
"6.2.0",
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_message.go b/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_message.go
index 8827a001..38e42bb2 100644
--- a/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_message.go
+++ b/vendor/github.com/mattermost/mattermost-server/v6/model/websocket_message.go
@@ -62,6 +62,8 @@ const (
WebsocketEventReceivedGroupNotAssociatedToTeam = "received_group_not_associated_to_team"
WebsocketEventReceivedGroupAssociatedToChannel = "received_group_associated_to_channel"
WebsocketEventReceivedGroupNotAssociatedToChannel = "received_group_not_associated_to_channel"
+ WebsocketEventGroupMemberDelete = "group_member_deleted"
+ WebsocketEventGroupMemberAdd = "group_member_add"
WebsocketEventSidebarCategoryCreated = "sidebar_category_created"
WebsocketEventSidebarCategoryUpdated = "sidebar_category_updated"
WebsocketEventSidebarCategoryDeleted = "sidebar_category_deleted"
@@ -88,6 +90,9 @@ type WebsocketBroadcast struct {
TeamId string `json:"team_id"` // broadcast only occurs for users in this team
ContainsSanitizedData bool `json:"-"`
ContainsSensitiveData bool `json:"-"`
+ // ReliableClusterSend indicates whether or not the message should
+ // be sent through the cluster using the reliable, TCP backed channel.
+ ReliableClusterSend bool `json:"-"`
}
func (wb *WebsocketBroadcast) copy() *WebsocketBroadcast {
diff --git a/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/graphql.go b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/graphql.go
new file mode 100644
index 00000000..a09e0699
--- /dev/null
+++ b/vendor/github.com/mattermost/mattermost-server/v6/shared/mlog/graphql.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
+// See LICENSE.txt for license information.
+
+package mlog
+
+import (
+ "context"
+)
+
+// GraphQLLogger is used to log panics that occur during query execution.
+type GraphQLLogger struct {
+ logger *Logger
+}
+
+func NewGraphQLLogger(logger *Logger) *GraphQLLogger {
+ return &GraphQLLogger{logger: logger}
+}
+
+// LogPanic satisfies the graphql/log.Logger interface.
+// It converts the panic into an error.
+func (l *GraphQLLogger) LogPanic(_ context.Context, value interface{}) {
+ l.logger.Error("Error while executing GraphQL query", Any("error", value))
+}
diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
index 8b1ee86c..24522ef7 100644
--- a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
+++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md
@@ -1,4 +1,3 @@
-
### Developer Guidelines
``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md
index 3ba174f4..211dd5e8 100644
--- a/vendor/github.com/minio/minio-go/v7/README.md
+++ b/vendor/github.com/minio/minio-go/v7/README.md
@@ -8,7 +8,7 @@ This document assumes that you have a working [Go development environment](https
## Download from Github
```sh
-GO111MODULE=on go get github.com/minio/minio-go/v7
+go get github.com/minio/minio-go/v7
```
## Initialize MinIO Client
@@ -115,7 +115,6 @@ func main() {
### Run FileUploader
```sh
-export GO111MODULE=on
go run file-uploader.go
2016/08/13 17:03:28 Successfully created mymusic
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go
index 39df7eec..dd781cae 100644
--- a/vendor/github.com/minio/minio-go/v7/api-error-response.go
+++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go
@@ -47,6 +47,7 @@ type ErrorResponse struct {
Message string
BucketName string
Key string
+ Resource string
RequestID string `xml:"RequestId"`
HostID string `xml:"HostId"`
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
index b1291b6b..9041d99e 100644
--- a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go
@@ -47,8 +47,9 @@ type AccessControlList struct {
}
type accessControlPolicy struct {
- Owner
- AccessControlList
+ XMLName xml.Name `xml:"AccessControlPolicy"`
+ Owner Owner
+ AccessControlList AccessControlList
}
// GetObjectACL get object ACLs
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go
index b9b96025..2ce4b260 100644
--- a/vendor/github.com/minio/minio-go/v7/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go
@@ -24,6 +24,7 @@ import (
"io"
"net/http"
"net/url"
+ "strconv"
"sync"
"github.com/minio/minio-go/v7/pkg/s3utils"
@@ -652,6 +653,9 @@ func (c *Client) getObject(ctx context.Context, bucketName, objectName string, o
if opts.VersionID != "" {
urlValues.Set("versionId", opts.VersionID)
}
+ if opts.PartNumber > 0 {
+ urlValues.Set("partNumber", strconv.Itoa(opts.PartNumber))
+ }
// Execute GET on objectName.
resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go
index 0be858d1..184ef9f8 100644
--- a/vendor/github.com/minio/minio-go/v7/api-get-options.go
+++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go
@@ -37,6 +37,7 @@ type GetObjectOptions struct {
headers map[string]string
ServerSideEncryption encrypt.ServerSide
VersionID string
+ PartNumber int
// To be not used by external applications
Internal AdvancedGetOptions
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
index fb61b407..e8a964e2 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -214,13 +214,20 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
//
// You must have WRITE permissions on a bucket to create an object.
//
-// - For size smaller than 128MiB PutObject automatically does a
-// single atomic Put operation.
-// - For size larger than 128MiB PutObject automatically does a
-// multipart Put operation.
+// - For size smaller than 16MiB PutObject automatically does a
+// single atomic PUT operation.
+//
+// - For size larger than 16MiB PutObject automatically does a
+// multipart upload operation.
+//
// - For size input as -1 PutObject does a multipart Put operation
// until input stream reaches EOF. Maximum object size that can
// be uploaded through this operation will be 5TiB.
+//
+// WARNING: Passing down '-1' will use memory and these cannot
+// be reused for best outcomes for PutObject(), pass the size always.
+//
+// NOTE: Upon errors during upload multipart operation is entirely aborted.
func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
opts PutObjectOptions) (info UploadInfo, err error) {
if objectSize < 0 && opts.DisableMultipart {
diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go
index fd3f1e12..0fee9022 100644
--- a/vendor/github.com/minio/minio-go/v7/api-remove.go
+++ b/vendor/github.com/minio/minio-go/v7/api-remove.go
@@ -136,11 +136,11 @@ func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string
return err
}
- return c.removeObject(ctx, bucketName, objectName, opts)
+ res := c.removeObject(ctx, bucketName, objectName, opts)
+ return res.Err
}
-func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error {
-
+func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) RemoveObjectResult {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@@ -181,19 +181,25 @@ func (c *Client) removeObject(ctx context.Context, bucketName, objectName string
})
defer closeResponse(resp)
if err != nil {
- return err
+ return RemoveObjectResult{Err: err}
}
if resp != nil {
// if some unexpected error happened and max retry is reached, we want to let client know
if resp.StatusCode != http.StatusNoContent {
- return httpRespToErrorResponse(resp, bucketName, objectName)
+ err := httpRespToErrorResponse(resp, bucketName, objectName)
+ return RemoveObjectResult{Err: err}
}
}
// DeleteObject always responds with http '204' even for
// objects which do not exist. So no need to handle them
// specifically.
- return nil
+ return RemoveObjectResult{
+ ObjectName: objectName,
+ ObjectVersionID: opts.VersionID,
+ DeleteMarker: resp.Header.Get("x-amz-delete-marker") == "true",
+ DeleteMarkerVersionID: resp.Header.Get("x-amz-version-id"),
+ }
}
// RemoveObjectError - container of Multi Delete S3 API error
@@ -203,6 +209,17 @@ type RemoveObjectError struct {
Err error
}
+// RemoveObjectResult - container of Multi Delete S3 API result
+type RemoveObjectResult struct {
+ ObjectName string
+ ObjectVersionID string
+
+ DeleteMarker bool
+ DeleteMarkerVersionID string
+
+ Err error
+}
+
// generateRemoveMultiObjects - generate the XML request for remove multi objects request
func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte {
delObjects := []deleteObject{}
@@ -212,21 +229,32 @@ func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte {
VersionID: obj.VersionID,
})
}
- xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: true})
+ xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: false})
return xmlBytes
}
// processRemoveMultiObjectsResponse - parse the remove multi objects web service
// and return the success/failure result status for each object
-func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, errorCh chan<- RemoveObjectError) {
+func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, resultCh chan<- RemoveObjectResult) {
// Parse multi delete XML response
rmResult := &deleteMultiObjectsResult{}
err := xmlDecoder(body, rmResult)
if err != nil {
- errorCh <- RemoveObjectError{ObjectName: "", Err: err}
+ resultCh <- RemoveObjectResult{ObjectName: "", Err: err}
return
}
+ // Fill deletion that returned success
+ for _, obj := range rmResult.DeletedObjects {
+ resultCh <- RemoveObjectResult{
+ ObjectName: obj.Key,
+ // Only filled with versioned buckets
+ ObjectVersionID: obj.VersionID,
+ DeleteMarker: obj.DeleteMarker,
+ DeleteMarkerVersionID: obj.DeleteMarkerVersionID,
+ }
+ }
+
// Fill deletion that returned an error.
for _, obj := range rmResult.UnDeletedObjects {
// Version does not exist is not an error ignore and continue.
@@ -234,9 +262,9 @@ func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, err
case "InvalidArgument", "NoSuchVersion":
continue
}
- errorCh <- RemoveObjectError{
- ObjectName: obj.Key,
- VersionID: obj.VersionID,
+ resultCh <- RemoveObjectResult{
+ ObjectName: obj.Key,
+ ObjectVersionID: obj.VersionID,
Err: ErrorResponse{
Code: obj.Code,
Message: obj.Message,
@@ -273,10 +301,54 @@ func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh
return errorCh
}
- go c.removeObjects(ctx, bucketName, objectsCh, errorCh, opts)
+ resultCh := make(chan RemoveObjectResult, 1)
+ go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts)
+ go func() {
+ defer close(errorCh)
+ for res := range resultCh {
+ // Send only errors to the error channel
+ if res.Err == nil {
+ continue
+ }
+ errorCh <- RemoveObjectError{
+ ObjectName: res.ObjectName,
+ VersionID: res.ObjectVersionID,
+ Err: res.Err,
+ }
+ }
+ }()
+
return errorCh
}
+// RemoveObjectsWithResult removes multiple objects from a bucket while
+// it is possible to specify objects versions which are received from
+// objectsCh. Remove results, successes and failures are sent back via
+// RemoveObjectResult channel
+func (c *Client) RemoveObjectsWithResult(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectResult {
+ resultCh := make(chan RemoveObjectResult, 1)
+
+ // Validate if bucket name is valid.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(resultCh)
+ resultCh <- RemoveObjectResult{
+ Err: err,
+ }
+ return resultCh
+ }
+ // Validate objects channel to be properly allocated.
+ if objectsCh == nil {
+ defer close(resultCh)
+ resultCh <- RemoveObjectResult{
+ Err: errInvalidArgument("Objects channel cannot be nil"),
+ }
+ return resultCh
+ }
+
+ go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts)
+ return resultCh
+}
+
// Return true if the character is within the allowed characters in an XML 1.0 document
// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets
func validXMLChar(r rune) (ok bool) {
@@ -298,14 +370,14 @@ func hasInvalidXMLChar(str string) bool {
}
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
-func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, errorCh chan<- RemoveObjectError, opts RemoveObjectsOptions) {
+func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) {
maxEntries := 1000
finish := false
urlValues := make(url.Values)
urlValues.Set("delete", "")
- // Close error channel when Multi delete finishes.
- defer close(errorCh)
+ // Close result channel when Multi delete finishes.
+ defer close(resultCh)
// Loop over entries by 1000 and call MultiDelete requests
for {
@@ -319,22 +391,20 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
for object := range objectsCh {
if hasInvalidXMLChar(object.Key) {
// Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document.
- err := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
+ removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{
VersionID: object.VersionID,
GovernanceBypass: opts.GovernanceBypass,
})
- if err != nil {
+ if err := removeResult.Err; err != nil {
// Version does not exist is not an error ignore and continue.
switch ToErrorResponse(err).Code {
case "InvalidArgument", "NoSuchVersion":
continue
}
- errorCh <- RemoveObjectError{
- ObjectName: object.Key,
- VersionID: object.VersionID,
- Err: err,
- }
+ resultCh <- removeResult
}
+
+ resultCh <- removeResult
continue
}
@@ -374,22 +444,22 @@ func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh
if resp != nil {
if resp.StatusCode != http.StatusOK {
e := httpRespToErrorResponse(resp, bucketName, "")
- errorCh <- RemoveObjectError{ObjectName: "", Err: e}
+ resultCh <- RemoveObjectResult{ObjectName: "", Err: e}
}
}
if err != nil {
for _, b := range batch {
- errorCh <- RemoveObjectError{
- ObjectName: b.Key,
- VersionID: b.VersionID,
- Err: err,
+ resultCh <- RemoveObjectResult{
+ ObjectName: b.Key,
+ ObjectVersionID: b.VersionID,
+ Err: err,
}
}
continue
}
// Process multiobjects remove xml response
- processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
+ processRemoveMultiObjectsResponse(resp.Body, batch, resultCh)
closeResponse(resp)
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
index 948f8a74..592d4cdc 100644
--- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
@@ -335,7 +335,7 @@ type deletedObject struct {
VersionID string `xml:"VersionId,omitempty"`
// These fields are ignored.
DeleteMarker bool
- DeleteMarkerVersionID string
+ DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
}
// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index b5eaa690..357cd1b4 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -111,7 +111,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "v7.0.16"
+ libraryVersion = "v7.0.21"
)
// User Agent should always following the below style.
@@ -182,67 +182,6 @@ func (r *lockedRandSource) Seed(seed int64) {
r.lk.Unlock()
}
-// Redirect requests by re signing the request.
-func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error {
- if len(via) >= 5 {
- return errors.New("stopped after 5 redirects")
- }
- if len(via) == 0 {
- return nil
- }
- lastRequest := via[len(via)-1]
- var reAuth bool
- for attr, val := range lastRequest.Header {
- // if hosts do not match do not copy Authorization header
- if attr == "Authorization" && req.Host != lastRequest.Host {
- reAuth = true
- continue
- }
- if _, ok := req.Header[attr]; !ok {
- req.Header[attr] = val
- }
- }
-
- *c.endpointURL = *req.URL
-
- value, err := c.credsProvider.Get()
- if err != nil {
- return err
- }
- var (
- signerType = value.SignerType
- accessKeyID = value.AccessKeyID
- secretAccessKey = value.SecretAccessKey
- sessionToken = value.SessionToken
- region = c.region
- )
-
- // Custom signer set then override the behavior.
- if c.overrideSignerType != credentials.SignatureDefault {
- signerType = c.overrideSignerType
- }
-
- // If signerType returned by credentials helper is anonymous,
- // then do not sign regardless of signerType override.
- if value.SignerType == credentials.SignatureAnonymous {
- signerType = credentials.SignatureAnonymous
- }
-
- if reAuth {
- // Check if there is no region override, if not get it from the URL if possible.
- if region == "" {
- region = s3utils.GetRegionFromURL(*c.endpointURL)
- }
- switch {
- case signerType.IsV2():
- return errors.New("signature V2 cannot support redirection")
- case signerType.IsV4():
- signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region))
- }
- }
- return nil
-}
-
func privateNew(endpoint string, opts *Options) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, opts.Secure)
@@ -279,9 +218,11 @@ func privateNew(endpoint string, opts *Options) (*Client, error) {
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
- Jar: jar,
- Transport: transport,
- CheckRedirect: clnt.redirectHeaders,
+ Jar: jar,
+ Transport: transport,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
}
// Sets custom region, if region is empty bucket location cache is used automatically.
@@ -917,8 +858,8 @@ func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, is
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
host = c.s3AccelerateEndpoint
} else {
- // Do not change the host if the endpoint URL is a FIPS S3 endpoint.
- if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) {
+ // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint
+ if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
}
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
index b8950dd2..413b63e5 100644
--- a/vendor/github.com/minio/minio-go/v7/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -1,3 +1,4 @@
+//go:build mint
// +build mint
/*
@@ -2627,6 +2628,138 @@ func testRemoveMultipleObjects() {
successLogger(testName, function, args, startTime).Info()
}
+// Test removing multiple objects and check for results
+func testRemoveMultipleObjectsWithResult() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveObjects(bucketName, objectsCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ defer cleanupVersionedBucket(bucketName, c)
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+
+ nrObjects := 10
+ nrLockedObjects := 5
+
+ objectsCh := make(chan minio.ObjectInfo)
+
+ go func() {
+ defer close(objectsCh)
+ // Upload objects and send them to objectsCh
+ for i := 0; i < nrObjects; i++ {
+ objectName := "sample" + strconv.Itoa(i) + ".txt"
+ info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8,
+ minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ if i < nrLockedObjects {
+ // t := time.Date(2130, time.April, 25, 14, 0, 0, 0, time.UTC)
+ t := time.Now().Add(5 * time.Minute)
+ m := minio.RetentionMode(minio.Governance)
+ opts := minio.PutObjectRetentionOptions{
+ GovernanceBypass: false,
+ RetainUntilDate: &t,
+ Mode: &m,
+ VersionID: info.VersionID,
+ }
+ err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error setting retention", err)
+ return
+ }
+ }
+
+ objectsCh <- minio.ObjectInfo{
+ Key: info.Key,
+ VersionID: info.VersionID,
+ }
+ }
+ }()
+
+ // Call RemoveObjects API
+ resultCh := c.RemoveObjectsWithResult(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{})
+
+ var foundNil, foundErr int
+
+ for {
+ // Check if errorCh doesn't receive any error
+ select {
+ case deleteRes, ok := <-resultCh:
+ if !ok {
+ goto out
+ }
+ if deleteRes.ObjectName == "" {
+ logError(testName, function, args, startTime, "", "Unexpected object name", nil)
+ return
+ }
+ if deleteRes.ObjectVersionID == "" {
+ logError(testName, function, args, startTime, "", "Unexpected object version ID", nil)
+ return
+ }
+
+ if deleteRes.Err == nil {
+ foundNil++
+ } else {
+ foundErr++
+ }
+ }
+ }
+out:
+ if foundNil+foundErr != nrObjects {
+ logError(testName, function, args, startTime, "", "Unexpected number of results", nil)
+ return
+ }
+
+ if foundNil != nrObjects-nrLockedObjects {
+ logError(testName, function, args, startTime, "", "Unexpected number of nil errors", nil)
+ return
+ }
+
+ if foundErr != nrLockedObjects {
+ logError(testName, function, args, startTime, "", "Unexpected number of errors", nil)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
// Tests FPutObject of a big file to trigger multipart
func testFPutObjectMultipart() {
// initialize logging params
@@ -11297,12 +11430,6 @@ func testGetObjectACLContext() {
// Seed random based on current time.
rand.Seed(time.Now().Unix())
- // skipping region functional tests for non s3 runs
- if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
- return
- }
-
// Instantiate new minio client object.
c, err := minio.New(os.Getenv(serverEndpoint),
&minio.Options{
@@ -11379,6 +11506,17 @@ func testGetObjectACLContext() {
return
}
+ // Do a very limited testing if this is not AWS S3
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+ if s[0] != "private" {
+ logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"private\" but got"+fmt.Sprintf("%q", s[0]), nil)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+ return
+ }
+
if s[0] != "public-read-write" {
logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil)
return
@@ -11978,6 +12116,7 @@ func main() {
// Default to KMS tests.
kms = true
}
+
// execute tests
if isFullMode() {
testMakeBucketErrorV2()
@@ -12009,6 +12148,7 @@ func main() {
testGetObjectClosedTwice()
testGetObjectS3Zip()
testRemoveMultipleObjects()
+ testRemoveMultipleObjectsWithResult()
testFPutObjectMultipart()
testFPutObject()
testGetObjectReadSeekFunctional()
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
index 3b1b547b..107a11b1 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
@@ -18,6 +18,7 @@
package credentials
import (
+ "bytes"
"encoding/hex"
"encoding/xml"
"errors"
@@ -184,11 +185,26 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
- return AssumeRoleResponse{}, errors.New(resp.Status)
+ var errResp ErrorResponse
+ buf, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return AssumeRoleResponse{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return AssumeRoleResponse{}, errResp
}
a := AssumeRoleResponse{}
- if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
+ if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil {
return AssumeRoleResponse{}, err
}
return a, nil
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
new file mode 100644
index 00000000..f4b027a4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
@@ -0,0 +1,96 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2021 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+)
+
+// ErrorResponse - Is the typed error returned.
+// ErrorResponse struct should be comparable since it is compared inside
+// golang http API (https://github.com/golang/go/issues/29768)
+type ErrorResponse struct {
+ XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"`
+ STSError struct {
+ Type string `xml:"Type"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+ } `xml:"Error"`
+ RequestID string `xml:"RequestId"`
+}
+
+// Error - Is the typed error returned by all API operations.
+type Error struct {
+ XMLName xml.Name `xml:"Error" json:"-"`
+ Code string
+ Message string
+ BucketName string
+ Key string
+ Resource string
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+
+ // Region where the bucket is located. This header is returned
+ // only in HEAD bucket and ListObjects response.
+ Region string
+
+ // Captures the server string returned in response header.
+ Server string
+
+ // Underlying HTTP status code for the returned error
+ StatusCode int `xml:"-" json:"-"`
+}
+
+// Error - Returns S3 error string.
+func (e Error) Error() string {
+ if e.Message == "" {
+ return fmt.Sprintf("Error response code %s.", e.Code)
+ }
+ return e.Message
+}
+
+// Error - Returns STS error string.
+func (e ErrorResponse) Error() string {
+ if e.STSError.Message == "" {
+ return fmt.Sprintf("Error response code %s.", e.STSError.Code)
+ }
+ return e.STSError.Message
+}
+
+// xmlDecoder provide decoded value in xml.
+func xmlDecoder(body io.Reader, v interface{}) error {
+ d := xml.NewDecoder(body)
+ return d.Decode(v)
+}
+
+// xmlDecodeAndBody reads the whole body up to 1MB and
+// tries to XML decode it into v.
+// The body that was read and any error from reading or decoding is returned.
+func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
+ // read the whole body (up to 1MB)
+ const maxBodyLength = 1 << 20
+ body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
+ if err != nil {
+ return nil, err
+ }
+ return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v)
+}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
index b7943330..b7943330 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
index b79f920f..b6712b19 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
@@ -18,9 +18,11 @@
package credentials
import (
+ "bytes"
"encoding/xml"
"errors"
"fmt"
+ "io/ioutil"
"net/http"
"net/url"
"time"
@@ -132,7 +134,23 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- return AssumeRoleWithClientGrantsResponse{}, errors.New(resp.Status)
+ var errResp ErrorResponse
+ buf, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return AssumeRoleWithClientGrantsResponse{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return AssumeRoleWithClientGrantsResponse{}, errResp
}
a := AssumeRoleWithClientGrantsResponse{}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
index bdde1fa3..39c7892b 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
@@ -18,9 +18,10 @@
package credentials
import (
+ "bytes"
"encoding/xml"
- "errors"
"fmt"
+ "io/ioutil"
"net/http"
"net/url"
"time"
@@ -169,7 +170,23 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- return value, errors.New(resp.Status)
+ var errResp ErrorResponse
+ buf, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return value, err
+
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return value, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return value, errResp
}
r := AssumeRoleWithLDAPResponse{}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts-tls-identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
index 2e37025a..7f485d63 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts-tls-identity.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
@@ -16,10 +16,12 @@
package credentials
import (
+ "bytes"
"crypto/tls"
"encoding/xml"
"errors"
"io"
+ "io/ioutil"
"net"
"net/http"
"net/url"
@@ -149,7 +151,23 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
defer resp.Body.Close()
}
if resp.StatusCode != http.StatusOK {
- return Value{}, errors.New(resp.Status)
+ var errResp ErrorResponse
+ buf, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return Value{}, err
+
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return Value{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return Value{}, errResp
}
const MaxSize = 10 * 1 << 20
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
index 25ca751d..98f6ea65 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
@@ -18,9 +18,11 @@
package credentials
import (
+ "bytes"
"encoding/xml"
"errors"
"fmt"
+ "io/ioutil"
"net/http"
"net/url"
"strconv"
@@ -150,7 +152,23 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
- return AssumeRoleWithWebIdentityResponse{}, errors.New(resp.Status)
+ var errResp ErrorResponse
+ buf, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+
+ }
+ _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp)
+ if err != nil {
+ var s3Err Error
+ if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil {
+ return AssumeRoleWithWebIdentityResponse{}, err
+ }
+ errResp.RequestID = s3Err.RequestID
+ errResp.STSError.Code = s3Err.Code
+ errResp.STSError.Message = s3Err.Message
+ }
+ return AssumeRoleWithWebIdentityResponse{}, errResp
}
a := AssumeRoleWithWebIdentityResponse{}
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
index 96f1101c..743d8eca 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
@@ -53,12 +53,12 @@ func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.Sta
// (or suspended) to request server delete noncurrent object versions at a
// specific period in the object's lifetime.
type NoncurrentVersionExpiration struct {
- XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
- NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"`
- MaxNoncurrentVersions int `xml:"MaxNoncurrentVersions,omitempty"`
+ XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
+ NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"`
+ NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty"`
}
-// MarshalXML if non-current days not set to non zero value
+// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions.
func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if n.isNull() {
return nil
@@ -73,16 +73,17 @@ func (n NoncurrentVersionExpiration) IsDaysNull() bool {
}
func (n NoncurrentVersionExpiration) isNull() bool {
- return n.IsDaysNull() && n.MaxNoncurrentVersions == 0
+ return n.IsDaysNull() && n.NewerNoncurrentVersions == 0
}
// NoncurrentVersionTransition structure, set this action to request server to
// transition noncurrent object versions to different set storage classes
// at a specific period in the object's lifetime.
type NoncurrentVersionTransition struct {
- XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"`
- StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
- NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"`
+ XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"`
+ StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
+ NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"`
+ NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"`
}
// IsDaysNull returns true if days field is null
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
index 44945464..2f1a5a65 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go
@@ -104,6 +104,9 @@ var elbAmazonRegex = regexp.MustCompile(`elb(.*?).amazonaws.com$`)
// Regular expression used to determine if the arg is elb host in china.
var elbAmazonCnRegex = regexp.MustCompile(`elb(.*?).amazonaws.com.cn$`)
+// amazonS3HostPrivateLink - regular expression used to determine if an arg is s3 host in AWS PrivateLink interface endpoints style
+var amazonS3HostPrivateLink = regexp.MustCompile(`^(?:bucket|accesspoint).vpce-.*?.s3.(.*?).vpce.amazonaws.com$`)
+
// GetRegionFromURL - returns a region from url host.
func GetRegionFromURL(endpointURL url.URL) string {
if endpointURL == sentinelURL {
@@ -139,6 +142,10 @@ func GetRegionFromURL(endpointURL url.URL) string {
if len(parts) > 1 {
return parts[1]
}
+ parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
return ""
}
@@ -202,6 +209,15 @@ func IsAmazonFIPSEndpoint(endpointURL url.URL) bool {
return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL)
}
+// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint
+// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html.
+func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return amazonS3HostPrivateLink.MatchString(endpointURL.Host)
+}
+
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
func IsGoogleEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
index b6ea78f7..cf7921d1 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go
@@ -243,10 +243,14 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
// Whitelist resource list that will be used in query string for signature-V2 calculation.
-// The list should be alphabetically sorted
+//
+// This list should be kept alphabetically sorted, do not hastily edit.
var resourceList = []string{
"acl",
+ "cors",
"delete",
+ "encryption",
+ "legal-hold",
"lifecycle",
"location",
"logging",
@@ -261,6 +265,10 @@ var resourceList = []string{
"response-content-language",
"response-content-type",
"response-expires",
+ "retention",
+ "select",
+ "select-type",
+ "tagging",
"torrent",
"uploadId",
"uploads",
diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go
index d5ad15b8..a88477b7 100644
--- a/vendor/github.com/minio/minio-go/v7/transport.go
+++ b/vendor/github.com/minio/minio-go/v7/transport.go
@@ -1,3 +1,4 @@
+//go:build go1.7 || go1.8
// +build go1.7 go1.8
/*
diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore
new file mode 100644
index 00000000..c57100a5
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/.gitignore
@@ -0,0 +1 @@
+coverage.txt
diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml
new file mode 100644
index 00000000..b950e429
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+matrix:
+ include:
+ - go: "1.13.x"
+ - go: "1.14.x"
+ - go: "tip"
+ env:
+ - LINT=true
+ - COVERAGE=true
+
+install:
+ - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi
+ - go get -u github.com/stretchr/testify/...
+
+script:
+ - make test
+ - go build ./...
+ - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi
+ - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
new file mode 100644
index 00000000..d3bfcf62
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
@@ -0,0 +1,63 @@
+Changes by Version
+==================
+
+
+1.2.0 (2020-07-01)
+-------------------
+
+* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro
+* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed
+* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena
+* Add Go module support (#215) -- Zaba505
+* Make SetTag helper types in ext public (#229) -- Blake Edwards
+* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov
+* Improve noop impementation (#223) -- chanxuehong
+* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak
+* Fix typo in comments (#222) -- meteorlxy
+* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的小企鹅
+* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad
+
+
+1.1.0 (2019-03-23)
+-------------------
+
+Notable changes:
+- The library is now released under Apache 2.0 license
+- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
+- 'golang.org/x/net/context' is replaced with 'context' from the standard library
+
+List of all changes:
+
+- Export StartSpanFromContextWithTracer (#214) <Aaron Delaney>
+- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) <Mike Goldsmith>
+- Use Set() instead of Add() in HTTPHeadersCarrier (#191) <jeremyxu2010>
+- Update license to Apache 2.0 (#181) <Andrea Kao>
+- Replace 'golang.org/x/net/context' with 'context' (#176) <Tony Ghita>
+- Port of Python opentracing/harness/api_check.py to Go (#146) <chris erway>
+- Fix race condition in MockSpan.Context() (#170) <Brad>
+- Add PeerHostIPv4.SetString() (#155) <NeoCN>
+- Add a Noop log field type to log to allow for optional fields (#150) <Matt Ho>
+
+
+1.0.2 (2017-04-26)
+-------------------
+
+- Add more semantic tags (#139) <Rustam Zagirov>
+
+
+1.0.1 (2017-02-06)
+-------------------
+
+- Correct spelling in comments <Ben Sigelman>
+- Address race in nextMockID() (#123) <bill fumerola>
+- log: avoid panic marshaling nil error (#131) <Anthony Voutas>
+- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) <Yuri Shkuro>
+- Drop Go 1.5 that fails in Travis (#129) <Yuri Shkuro>
+- Add convenience methods Key() and Value() to log.Field <Ben Sigelman>
+- Add convenience methods to log.Field (2 years, 6 months ago) <Radu Berinde>
+
+1.0.0 (2016-09-26)
+-------------------
+
+- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
+
diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE
new file mode 100644
index 00000000..f0027349
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 The OpenTracing Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile
new file mode 100644
index 00000000..62abb63f
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/Makefile
@@ -0,0 +1,20 @@
+.DEFAULT_GOAL := test-and-lint
+
+.PHONY: test-and-lint
+test-and-lint: test lint
+
+.PHONY: test
+test:
+ go test -v -cover -race ./...
+
+.PHONY: cover
+cover:
+ go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
+
+.PHONY: lint
+lint:
+ go fmt ./...
+ golint ./...
+ @# Run again with magic to exit non-zero if golint outputs anything.
+ @! (golint ./... | read dummy)
+ go vet ./...
diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md
new file mode 100644
index 00000000..6ef1d7c9
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/README.md
@@ -0,0 +1,171 @@
+[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
+[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
+
+# OpenTracing API for Go
+
+This package is a Go platform API for OpenTracing.
+
+## Required Reading
+
+In order to understand the Go platform API, one must first be familiar with the
+[OpenTracing project](https://opentracing.io) and
+[terminology](https://opentracing.io/specification/) more specifically.
+
+## API overview for those adding instrumentation
+
+Everyday consumers of this `opentracing` package really only need to worry
+about a couple of key abstractions: the `StartSpan` function, the `Span`
+interface, and binding a `Tracer` at `main()`-time. Here are code snippets
+demonstrating some important use cases.
+
+#### Singleton initialization
+
+The simplest starting point is `./default_tracer.go`. As early as possible, call
+
+```go
+ import "github.com/opentracing/opentracing-go"
+ import ".../some_tracing_impl"
+
+ func main() {
+ opentracing.SetGlobalTracer(
+ // tracing impl specific:
+ some_tracing_impl.New(...),
+ )
+ ...
+ }
+```
+
+#### Non-Singleton initialization
+
+If you prefer direct control to singletons, manage ownership of the
+`opentracing.Tracer` implementation explicitly.
+
+#### Creating a Span given an existing Go `context.Context`
+
+If you use `context.Context` in your application, OpenTracing's Go library will
+happily rely on it for `Span` propagation. To start a new (blocking child)
+`Span`, you can use `StartSpanFromContext`.
+
+```go
+ func xyz(ctx context.Context, ...) {
+ ...
+ span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
+ defer span.Finish()
+ span.LogFields(
+ log.String("event", "soft error"),
+ log.String("type", "cache timeout"),
+ log.Int("waited.millis", 1500))
+ ...
+ }
+```
+
+#### Starting an empty trace by creating a "root span"
+
+It's always possible to create a "root" `Span` with no parent or other causal
+reference.
+
+```go
+ func xyz() {
+ ...
+ sp := opentracing.StartSpan("operation_name")
+ defer sp.Finish()
+ ...
+ }
+```
+
+#### Creating a (child) Span given an existing (parent) Span
+
+```go
+ func xyz(parentSpan opentracing.Span, ...) {
+ ...
+ sp := opentracing.StartSpan(
+ "operation_name",
+ opentracing.ChildOf(parentSpan.Context()))
+ defer sp.Finish()
+ ...
+ }
+```
+
+#### Serializing to the wire
+
+```go
+ func makeSomeRequest(ctx context.Context) ... {
+ if span := opentracing.SpanFromContext(ctx); span != nil {
+ httpClient := &http.Client{}
+ httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
+
+ // Transmit the span's TraceContext as HTTP headers on our
+ // outbound request.
+ opentracing.GlobalTracer().Inject(
+ span.Context(),
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(httpReq.Header))
+
+ resp, err := httpClient.Do(httpReq)
+ ...
+ }
+ ...
+ }
+```
+
+#### Deserializing from the wire
+
+```go
+ http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+ var serverSpan opentracing.Span
+ appSpecificOperationName := ...
+ wireContext, err := opentracing.GlobalTracer().Extract(
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(req.Header))
+ if err != nil {
+ // Optionally record something about err here
+ }
+
+ // Create the span referring to the RPC client if available.
+ // If wireContext == nil, a root span will be created.
+ serverSpan = opentracing.StartSpan(
+ appSpecificOperationName,
+ ext.RPCServerOption(wireContext))
+
+ defer serverSpan.Finish()
+
+ ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
+ ...
+ }
+```
+
+#### Conditionally capture a field using `log.Noop`
+
+In some situations, you may want to dynamically decide whether or not
+to log a field. For example, you may want to capture additional data,
+such as a customer ID, in non-production environments:
+
+```go
+ func Customer(order *Order) log.Field {
+ if os.Getenv("ENVIRONMENT") == "dev" {
+ return log.String("customer", order.Customer.ID)
+ }
+ return log.Noop()
+ }
+```
+
+#### Goroutine-safety
+
+The entire public API is goroutine-safe and does not require external
+synchronization.
+
+## API pointers for those implementing a tracing system
+
+Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
+
+## API compatibility
+
+For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
+
+## Tracer test suite
+
+A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
+
+## Licensing
+
+[Apache 2.0 License](./LICENSE).
diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go
new file mode 100644
index 00000000..e11977eb
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext.go
@@ -0,0 +1,24 @@
+package opentracing
+
+import (
+ "context"
+)
+
+// TracerContextWithSpanExtension is an extension interface that the
+// implementation of the Tracer interface may want to implement. It
+// allows to have some control over the go context when the
+// ContextWithSpan is invoked.
+//
+// The primary purpose of this extension are adapters from opentracing
+// API to some other tracing API.
+type TracerContextWithSpanExtension interface {
+ // ContextWithSpanHook gets called by the ContextWithSpan
+ // function, when the Tracer implementation also implements
+ // this interface. It allows to put extra information into the
+ // context and make it available to the callers of the
+ // ContextWithSpan.
+ //
+ // This hook is invoked before the ContextWithSpan function
+ // actually puts the span into the context.
+ ContextWithSpanHook(ctx context.Context, span Span) context.Context
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go
new file mode 100644
index 00000000..8282bd75
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/field.go
@@ -0,0 +1,17 @@
+package ext
+
+import (
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/log"
+)
+
+// LogError sets the error=true tag on the Span and logs err as an "error" event.
+func LogError(span opentracing.Span, err error, fields ...log.Field) {
+ Error.Set(span, true)
+ ef := []log.Field{
+ log.Event("error"),
+ log.Error(err),
+ }
+ ef = append(ef, fields...)
+ span.LogFields(ef...)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
new file mode 100644
index 00000000..a414b595
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
@@ -0,0 +1,215 @@
+package ext
+
+import "github.com/opentracing/opentracing-go"
+
+// These constants define common tag names recommended for better portability across
+// tracing systems and languages/platforms.
+//
+// The tag names are defined as typed strings, so that in addition to the usual use
+//
+// span.setTag(TagName, value)
+//
+// they also support value type validation via this additional syntax:
+//
+// TagName.Set(span, value)
+//
+var (
+ //////////////////////////////////////////////////////////////////////
+ // SpanKind (client/server or producer/consumer)
+ //////////////////////////////////////////////////////////////////////
+
+ // SpanKind hints at relationship between spans, e.g. client/server
+ SpanKind = spanKindTagName("span.kind")
+
+ // SpanKindRPCClient marks a span representing the client-side of an RPC
+ // or other remote call
+ SpanKindRPCClientEnum = SpanKindEnum("client")
+ SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum}
+
+ // SpanKindRPCServer marks a span representing the server-side of an RPC
+ // or other remote call
+ SpanKindRPCServerEnum = SpanKindEnum("server")
+ SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum}
+
+ // SpanKindProducer marks a span representing the producer-side of a
+ // message bus
+ SpanKindProducerEnum = SpanKindEnum("producer")
+ SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum}
+
+ // SpanKindConsumer marks a span representing the consumer-side of a
+ // message bus
+ SpanKindConsumerEnum = SpanKindEnum("consumer")
+ SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum}
+
+ //////////////////////////////////////////////////////////////////////
+ // Component name
+ //////////////////////////////////////////////////////////////////////
+
+ // Component is a low-cardinality identifier of the module, library,
+ // or package that is generating a span.
+ Component = StringTagName("component")
+
+ //////////////////////////////////////////////////////////////////////
+ // Sampling hint
+ //////////////////////////////////////////////////////////////////////
+
+ // SamplingPriority determines the priority of sampling this Span.
+ SamplingPriority = Uint16TagName("sampling.priority")
+
+ //////////////////////////////////////////////////////////////////////
+ // Peer tags. These tags can be emitted by either client-side or
+ // server-side to describe the other side/service in a peer-to-peer
+ // communications, like an RPC call.
+ //////////////////////////////////////////////////////////////////////
+
+ // PeerService records the service name of the peer.
+ PeerService = StringTagName("peer.service")
+
+ // PeerAddress records the address name of the peer. This may be a "ip:port",
+ // a bare "hostname", a FQDN or even a database DSN substring
+ // like "mysql://username@127.0.0.1:3306/dbname"
+ PeerAddress = StringTagName("peer.address")
+
+ // PeerHostname records the host name of the peer
+ PeerHostname = StringTagName("peer.hostname")
+
+ // PeerHostIPv4 records IP v4 host address of the peer
+ PeerHostIPv4 = IPv4TagName("peer.ipv4")
+
+ // PeerHostIPv6 records IP v6 host address of the peer
+ PeerHostIPv6 = StringTagName("peer.ipv6")
+
+ // PeerPort records port number of the peer
+ PeerPort = Uint16TagName("peer.port")
+
+ //////////////////////////////////////////////////////////////////////
+ // HTTP Tags
+ //////////////////////////////////////////////////////////////////////
+
+ // HTTPUrl should be the URL of the request being handled in this segment
+ // of the trace, in standard URI format. The protocol is optional.
+ HTTPUrl = StringTagName("http.url")
+
+ // HTTPMethod is the HTTP method of the request, and is case-insensitive.
+ HTTPMethod = StringTagName("http.method")
+
+ // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the
+ // HTTP response.
+ HTTPStatusCode = Uint16TagName("http.status_code")
+
+ //////////////////////////////////////////////////////////////////////
+ // DB Tags
+ //////////////////////////////////////////////////////////////////////
+
+ // DBInstance is database instance name.
+ DBInstance = StringTagName("db.instance")
+
+ // DBStatement is a database statement for the given database type.
+ // It can be a query or a prepared statement (i.e., before substitution).
+ DBStatement = StringTagName("db.statement")
+
+ // DBType is a database type. For any SQL database, "sql".
+ // For others, the lower-case database category, e.g. "redis"
+ DBType = StringTagName("db.type")
+
+ // DBUser is a username for accessing database.
+ DBUser = StringTagName("db.user")
+
+ //////////////////////////////////////////////////////////////////////
+ // Message Bus Tag
+ //////////////////////////////////////////////////////////////////////
+
+ // MessageBusDestination is an address at which messages can be exchanged
+ MessageBusDestination = StringTagName("message_bus.destination")
+
+ //////////////////////////////////////////////////////////////////////
+ // Error Tag
+ //////////////////////////////////////////////////////////////////////
+
+ // Error indicates that operation represented by the span resulted in an error.
+ Error = BoolTagName("error")
+)
+
+// ---
+
+// SpanKindEnum represents common span types
+type SpanKindEnum string
+
+type spanKindTagName string
+
+// Set adds a string tag to the `span`
+func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) {
+ span.SetTag(string(tag), value)
+}
+
+type rpcServerOption struct {
+ clientContext opentracing.SpanContext
+}
+
+func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) {
+ if r.clientContext != nil {
+ opentracing.ChildOf(r.clientContext).Apply(o)
+ }
+ SpanKindRPCServer.Apply(o)
+}
+
+// RPCServerOption returns a StartSpanOption appropriate for an RPC server span
+// with `client` representing the metadata for the remote peer Span if available.
+// In case client == nil, due to the client not being instrumented, this RPC
+// server span will be a root span.
+func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption {
+ return rpcServerOption{client}
+}
+
+// ---
+
+// StringTagName is a common tag name to be set to a string value
+type StringTagName string
+
+// Set adds a string tag to the `span`
+func (tag StringTagName) Set(span opentracing.Span, value string) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+// Uint32TagName is a common tag name to be set to a uint32 value
+type Uint32TagName string
+
+// Set adds a uint32 tag to the `span`
+func (tag Uint32TagName) Set(span opentracing.Span, value uint32) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+// Uint16TagName is a common tag name to be set to a uint16 value
+type Uint16TagName string
+
+// Set adds a uint16 tag to the `span`
+func (tag Uint16TagName) Set(span opentracing.Span, value uint16) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+// BoolTagName is a common tag name to be set to a bool value
+type BoolTagName string
+
+// Set adds a bool tag to the `span`
+func (tag BoolTagName) Set(span opentracing.Span, value bool) {
+ span.SetTag(string(tag), value)
+}
+
+// IPv4TagName is a common tag name to be set to an ipv4 value
+type IPv4TagName string
+
+// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility
+func (tag IPv4TagName) Set(span opentracing.Span, value uint32) {
+ span.SetTag(string(tag), value)
+}
+
+// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1"
+func (tag IPv4TagName) SetString(span opentracing.Span, value string) {
+ span.SetTag(string(tag), value)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
new file mode 100644
index 00000000..4f7066a9
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
@@ -0,0 +1,42 @@
+package opentracing
+
+type registeredTracer struct {
+ tracer Tracer
+ isRegistered bool
+}
+
+var (
+ globalTracer = registeredTracer{NoopTracer{}, false}
+)
+
+// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
+// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
+// opentracing.Tracer instance) should call SetGlobalTracer as early as
+// possible in main(), prior to calling the `StartSpan` global func below.
+// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
+// (etc) globals are noops.
+func SetGlobalTracer(tracer Tracer) {
+ globalTracer = registeredTracer{tracer, true}
+}
+
+// GlobalTracer returns the global singleton `Tracer` implementation.
+// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
+// implementation that drops all data handed to it.
+func GlobalTracer() Tracer {
+ return globalTracer.tracer
+}
+
+// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
+func StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return globalTracer.tracer.StartSpan(operationName, opts...)
+}
+
+// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
+func InitGlobalTracer(tracer Tracer) {
+ SetGlobalTracer(tracer)
+}
+
+// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
+func IsGlobalTracerRegistered() bool {
+ return globalTracer.isRegistered
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go
new file mode 100644
index 00000000..1831bc9b
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go
@@ -0,0 +1,65 @@
+package opentracing
+
+import "context"
+
+type contextKey struct{}
+
+var activeSpanKey = contextKey{}
+
+// ContextWithSpan returns a new `context.Context` that holds a reference to
+// the span. If span is nil, a new context without an active span is returned.
+func ContextWithSpan(ctx context.Context, span Span) context.Context {
+ if span != nil {
+ if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok {
+ ctx = tracerWithHook.ContextWithSpanHook(ctx, span)
+ }
+ }
+ return context.WithValue(ctx, activeSpanKey, span)
+}
+
+// SpanFromContext returns the `Span` previously associated with `ctx`, or
+// `nil` if no such `Span` could be found.
+//
+// NOTE: context.Context != SpanContext: the former is Go's intra-process
+// context propagation mechanism, and the latter houses OpenTracing's per-Span
+// identity and baggage information.
+func SpanFromContext(ctx context.Context) Span {
+ val := ctx.Value(activeSpanKey)
+ if sp, ok := val.(Span); ok {
+ return sp
+ }
+ return nil
+}
+
+// StartSpanFromContext starts and returns a Span with `operationName`, using
+// any Span found within `ctx` as a ChildOfRef. If no such parent could be
+// found, StartSpanFromContext creates a root (parentless) Span.
+//
+// The second return value is a context.Context object built around the
+// returned Span.
+//
+// Example usage:
+//
+// SomeFunction(ctx context.Context, ...) {
+// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
+// defer sp.Finish()
+// ...
+// }
+func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
+}
+
+// StartSpanFromContextWithTracer starts and returns a span with `operationName`
+// using a span found within the context as a ChildOfRef. If that doesn't exist
+// it creates a root span. It also returns a context.Context object built
+// around the returned span.
+//
+// It's behavior is identical to StartSpanFromContext except that it takes an explicit
+// tracer as opposed to using the global tracer.
+func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ if parentSpan := SpanFromContext(ctx); parentSpan != nil {
+ opts = append(opts, ChildOf(parentSpan.Context()))
+ }
+ span := tracer.StartSpan(operationName, opts...)
+ return span, ContextWithSpan(ctx, span)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go
new file mode 100644
index 00000000..f222ded7
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/field.go
@@ -0,0 +1,282 @@
+package log
+
+import (
+ "fmt"
+ "math"
+)
+
+type fieldType int
+
+const (
+ stringType fieldType = iota
+ boolType
+ intType
+ int32Type
+ uint32Type
+ int64Type
+ uint64Type
+ float32Type
+ float64Type
+ errorType
+ objectType
+ lazyLoggerType
+ noopType
+)
+
+// Field instances are constructed via LogBool, LogString, and so on.
+// Tracing implementations may then handle them via the Field.Marshal
+// method.
+//
+// "heavily influenced by" (i.e., partially stolen from)
+// https://github.com/uber-go/zap
+type Field struct {
+ key string
+ fieldType fieldType
+ numericVal int64
+ stringVal string
+ interfaceVal interface{}
+}
+
+// String adds a string-valued key:value pair to a Span.LogFields() record
+func String(key, val string) Field {
+ return Field{
+ key: key,
+ fieldType: stringType,
+ stringVal: val,
+ }
+}
+
+// Bool adds a bool-valued key:value pair to a Span.LogFields() record
+func Bool(key string, val bool) Field {
+ var numericVal int64
+ if val {
+ numericVal = 1
+ }
+ return Field{
+ key: key,
+ fieldType: boolType,
+ numericVal: numericVal,
+ }
+}
+
+// Int adds an int-valued key:value pair to a Span.LogFields() record
+func Int(key string, val int) Field {
+ return Field{
+ key: key,
+ fieldType: intType,
+ numericVal: int64(val),
+ }
+}
+
+// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
+func Int32(key string, val int32) Field {
+ return Field{
+ key: key,
+ fieldType: int32Type,
+ numericVal: int64(val),
+ }
+}
+
+// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
+func Int64(key string, val int64) Field {
+ return Field{
+ key: key,
+ fieldType: int64Type,
+ numericVal: val,
+ }
+}
+
+// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
+func Uint32(key string, val uint32) Field {
+ return Field{
+ key: key,
+ fieldType: uint32Type,
+ numericVal: int64(val),
+ }
+}
+
+// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
+func Uint64(key string, val uint64) Field {
+ return Field{
+ key: key,
+ fieldType: uint64Type,
+ numericVal: int64(val),
+ }
+}
+
+// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
+func Float32(key string, val float32) Field {
+ return Field{
+ key: key,
+ fieldType: float32Type,
+ numericVal: int64(math.Float32bits(val)),
+ }
+}
+
+// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
+func Float64(key string, val float64) Field {
+ return Field{
+ key: key,
+ fieldType: float64Type,
+ numericVal: int64(math.Float64bits(val)),
+ }
+}
+
+// Error adds an error with the key "error.object" to a Span.LogFields() record
+func Error(err error) Field {
+ return Field{
+ key: "error.object",
+ fieldType: errorType,
+ interfaceVal: err,
+ }
+}
+
+// Object adds an object-valued key:value pair to a Span.LogFields() record
+// Please pass in an immutable object, otherwise there may be concurrency issues.
+// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write".
+// Because span is sent asynchronously, it is possible that this map will also be modified.
+func Object(key string, obj interface{}) Field {
+ return Field{
+ key: key,
+ fieldType: objectType,
+ interfaceVal: obj,
+ }
+}
+
+// Event creates a string-valued Field for span logs with key="event" and value=val.
+func Event(val string) Field {
+ return String("event", val)
+}
+
+// Message creates a string-valued Field for span logs with key="message" and value=val.
+func Message(val string) Field {
+ return String("message", val)
+}
+
+// LazyLogger allows for user-defined, late-bound logging of arbitrary data
+type LazyLogger func(fv Encoder)
+
+// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
+// implementation will call the LazyLogger function at an indefinite time in
+// the future (after Lazy() returns).
+func Lazy(ll LazyLogger) Field {
+ return Field{
+ fieldType: lazyLoggerType,
+ interfaceVal: ll,
+ }
+}
+
+// Noop creates a no-op log field that should be ignored by the tracer.
+// It can be used to capture optional fields, for example those that should
+// only be logged in non-production environment:
+//
+// func customerField(order *Order) log.Field {
+// if os.Getenv("ENVIRONMENT") == "dev" {
+// return log.String("customer", order.Customer.ID)
+// }
+// return log.Noop()
+// }
+//
+// span.LogFields(log.String("event", "purchase"), customerField(order))
+//
+func Noop() Field {
+ return Field{
+ fieldType: noopType,
+ }
+}
+
+// Encoder allows access to the contents of a Field (via a call to
+// Field.Marshal).
+//
+// Tracer implementations typically provide an implementation of Encoder;
+// OpenTracing callers typically do not need to concern themselves with it.
+type Encoder interface {
+ EmitString(key, value string)
+ EmitBool(key string, value bool)
+ EmitInt(key string, value int)
+ EmitInt32(key string, value int32)
+ EmitInt64(key string, value int64)
+ EmitUint32(key string, value uint32)
+ EmitUint64(key string, value uint64)
+ EmitFloat32(key string, value float32)
+ EmitFloat64(key string, value float64)
+ EmitObject(key string, value interface{})
+ EmitLazyLogger(value LazyLogger)
+}
+
+// Marshal passes a Field instance through to the appropriate
+// field-type-specific method of an Encoder.
+func (lf Field) Marshal(visitor Encoder) {
+ switch lf.fieldType {
+ case stringType:
+ visitor.EmitString(lf.key, lf.stringVal)
+ case boolType:
+ visitor.EmitBool(lf.key, lf.numericVal != 0)
+ case intType:
+ visitor.EmitInt(lf.key, int(lf.numericVal))
+ case int32Type:
+ visitor.EmitInt32(lf.key, int32(lf.numericVal))
+ case int64Type:
+ visitor.EmitInt64(lf.key, int64(lf.numericVal))
+ case uint32Type:
+ visitor.EmitUint32(lf.key, uint32(lf.numericVal))
+ case uint64Type:
+ visitor.EmitUint64(lf.key, uint64(lf.numericVal))
+ case float32Type:
+ visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
+ case float64Type:
+ visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
+ case errorType:
+ if err, ok := lf.interfaceVal.(error); ok {
+ visitor.EmitString(lf.key, err.Error())
+ } else {
+ visitor.EmitString(lf.key, "<nil>")
+ }
+ case objectType:
+ visitor.EmitObject(lf.key, lf.interfaceVal)
+ case lazyLoggerType:
+ visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
+ case noopType:
+ // intentionally left blank
+ }
+}
+
+// Key returns the field's key.
+func (lf Field) Key() string {
+ return lf.key
+}
+
+// Value returns the field's value as interface{}.
+func (lf Field) Value() interface{} {
+ switch lf.fieldType {
+ case stringType:
+ return lf.stringVal
+ case boolType:
+ return lf.numericVal != 0
+ case intType:
+ return int(lf.numericVal)
+ case int32Type:
+ return int32(lf.numericVal)
+ case int64Type:
+ return int64(lf.numericVal)
+ case uint32Type:
+ return uint32(lf.numericVal)
+ case uint64Type:
+ return uint64(lf.numericVal)
+ case float32Type:
+ return math.Float32frombits(uint32(lf.numericVal))
+ case float64Type:
+ return math.Float64frombits(uint64(lf.numericVal))
+ case errorType, objectType, lazyLoggerType:
+ return lf.interfaceVal
+ case noopType:
+ return nil
+ default:
+ return nil
+ }
+}
+
+// String returns a string representation of the key and value.
+func (lf Field) String() string {
+ return fmt.Sprint(lf.key, ":", lf.Value())
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go
new file mode 100644
index 00000000..d57e28aa
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/util.go
@@ -0,0 +1,61 @@
+package log
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
+// a la Span.LogFields().
+func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
+ if len(keyValues)%2 != 0 {
+ return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
+ }
+ fields := make([]Field, len(keyValues)/2)
+ for i := 0; i*2 < len(keyValues); i++ {
+ key, ok := keyValues[i*2].(string)
+ if !ok {
+ return nil, fmt.Errorf(
+ "non-string key (pair #%d): %T",
+ i, keyValues[i*2])
+ }
+ switch typedVal := keyValues[i*2+1].(type) {
+ case bool:
+ fields[i] = Bool(key, typedVal)
+ case string:
+ fields[i] = String(key, typedVal)
+ case int:
+ fields[i] = Int(key, typedVal)
+ case int8:
+ fields[i] = Int32(key, int32(typedVal))
+ case int16:
+ fields[i] = Int32(key, int32(typedVal))
+ case int32:
+ fields[i] = Int32(key, typedVal)
+ case int64:
+ fields[i] = Int64(key, typedVal)
+ case uint:
+ fields[i] = Uint64(key, uint64(typedVal))
+ case uint64:
+ fields[i] = Uint64(key, typedVal)
+ case uint8:
+ fields[i] = Uint32(key, uint32(typedVal))
+ case uint16:
+ fields[i] = Uint32(key, uint32(typedVal))
+ case uint32:
+ fields[i] = Uint32(key, typedVal)
+ case float32:
+ fields[i] = Float32(key, typedVal)
+ case float64:
+ fields[i] = Float64(key, typedVal)
+ default:
+ if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) {
+ fields[i] = String(key, "nil")
+ continue
+ }
+ // When in doubt, coerce to a string
+ fields[i] = String(key, fmt.Sprint(typedVal))
+ }
+ }
+ return fields, nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go
new file mode 100644
index 00000000..f9b680a2
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/noop.go
@@ -0,0 +1,64 @@
+package opentracing
+
+import "github.com/opentracing/opentracing-go/log"
+
+// A NoopTracer is a trivial, minimum overhead implementation of Tracer
+// for which all operations are no-ops.
+//
+// The primary use of this implementation is in libraries, such as RPC
+// frameworks, that make tracing an optional feature controlled by the
+// end user. A no-op implementation allows said libraries to use it
+// as the default Tracer and to write instrumentation that does
+// not need to keep checking if the tracer instance is nil.
+//
+// For the same reason, the NoopTracer is the default "global" tracer
+// (see GlobalTracer and SetGlobalTracer functions).
+//
+// WARNING: NoopTracer does not support baggage propagation.
+type NoopTracer struct{}
+
+type noopSpan struct{}
+type noopSpanContext struct{}
+
+var (
+ defaultNoopSpanContext SpanContext = noopSpanContext{}
+ defaultNoopSpan Span = noopSpan{}
+ defaultNoopTracer Tracer = NoopTracer{}
+)
+
+const (
+ emptyString = ""
+)
+
+// noopSpanContext:
+func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
+
+// noopSpan:
+func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
+func (n noopSpan) SetBaggageItem(key, val string) Span { return n }
+func (n noopSpan) BaggageItem(key string) string { return emptyString }
+func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
+func (n noopSpan) LogFields(fields ...log.Field) {}
+func (n noopSpan) LogKV(keyVals ...interface{}) {}
+func (n noopSpan) Finish() {}
+func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
+func (n noopSpan) SetOperationName(operationName string) Span { return n }
+func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
+func (n noopSpan) LogEvent(event string) {}
+func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
+func (n noopSpan) Log(data LogData) {}
+
+// StartSpan belongs to the Tracer interface.
+func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return defaultNoopSpan
+}
+
+// Inject belongs to the Tracer interface.
+func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
+ return nil
+}
+
+// Extract belongs to the Tracer interface.
+func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
+ return nil, ErrSpanContextNotFound
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go
new file mode 100644
index 00000000..b0c275eb
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/propagation.go
@@ -0,0 +1,176 @@
+package opentracing
+
+import (
+ "errors"
+ "net/http"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// CORE PROPAGATION INTERFACES:
+///////////////////////////////////////////////////////////////////////////////
+
+var (
+ // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
+ // Tracer.Extract() is not recognized by the Tracer implementation.
+ ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
+
+ // ErrSpanContextNotFound occurs when the `carrier` passed to
+ // Tracer.Extract() is valid and uncorrupted but has insufficient
+ // information to extract a SpanContext.
+ ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
+
+ // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
+ // operate on a SpanContext which it is not prepared to handle (for
+ // example, since it was created by a different tracer implementation).
+ ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
+
+ // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
+ // implementations expect a different type of `carrier` than they are
+ // given.
+ ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
+
+ // ErrSpanContextCorrupted occurs when the `carrier` passed to
+ // Tracer.Extract() is of the expected type but is corrupted.
+ ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// BUILTIN PROPAGATION FORMATS:
+///////////////////////////////////////////////////////////////////////////////
+
+// BuiltinFormat is used to demarcate the values within package `opentracing`
+// that are intended for use with the Tracer.Inject() and Tracer.Extract()
+// methods.
+type BuiltinFormat byte
+
+const (
+ // Binary represents SpanContexts as opaque binary data.
+ //
+ // For Tracer.Inject(): the carrier must be an `io.Writer`.
+ //
+ // For Tracer.Extract(): the carrier must be an `io.Reader`.
+ Binary BuiltinFormat = iota
+
+ // TextMap represents SpanContexts as key:value string pairs.
+ //
+ // Unlike HTTPHeaders, the TextMap format does not restrict the key or
+ // value character sets in any way.
+ //
+ // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+ //
+ // For Tracer.Extract(): the carrier must be a `TextMapReader`.
+ TextMap
+
+ // HTTPHeaders represents SpanContexts as HTTP header string pairs.
+ //
+ // Unlike TextMap, the HTTPHeaders format requires that the keys and values
+ // be valid as HTTP headers as-is (i.e., character casing may be unstable
+ // and special characters are disallowed in keys, values should be
+ // URL-escaped, etc).
+ //
+ // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+ //
+ // For Tracer.Extract(): the carrier must be a `TextMapReader`.
+ //
+ // See HTTPHeadersCarrier for an implementation of both TextMapWriter
+ // and TextMapReader that defers to an http.Header instance for storage.
+ // For example, Inject():
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // err := span.Tracer().Inject(
+ // span.Context(), opentracing.HTTPHeaders, carrier)
+ //
+ // Or Extract():
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // clientContext, err := tracer.Extract(
+ // opentracing.HTTPHeaders, carrier)
+ //
+ HTTPHeaders
+)
+
+// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
+// it, the caller can encode a SpanContext for propagation as entries in a map
+// of unicode strings.
+type TextMapWriter interface {
+ // Set a key:value pair to the carrier. Multiple calls to Set() for the
+ // same key leads to undefined behavior.
+ //
+ // NOTE: The backing store for the TextMapWriter may contain data unrelated
+ // to SpanContext. As such, Inject() and Extract() implementations that
+ // call the TextMapWriter and TextMapReader interfaces must agree on a
+ // prefix or other convention to distinguish their own key:value pairs.
+ Set(key, val string)
+}
+
+// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
+// the caller can decode a propagated SpanContext as entries in a map of
+// unicode strings.
+type TextMapReader interface {
+ // ForeachKey returns TextMap contents via repeated calls to the `handler`
+ // function. If any call to `handler` returns a non-nil error, ForeachKey
+ // terminates and returns that error.
+ //
+ // NOTE: The backing store for the TextMapReader may contain data unrelated
+ // to SpanContext. As such, Inject() and Extract() implementations that
+ // call the TextMapWriter and TextMapReader interfaces must agree on a
+ // prefix or other convention to distinguish their own key:value pairs.
+ //
+ // The "foreach" callback pattern reduces unnecessary copying in some cases
+ // and also allows implementations to hold locks while the map is read.
+ ForeachKey(handler func(key, val string) error) error
+}
+
+// TextMapCarrier allows the use of regular map[string]string
+// as both TextMapWriter and TextMapReader.
+type TextMapCarrier map[string]string
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, v := range c {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set implements Set() of opentracing.TextMapWriter
+func (c TextMapCarrier) Set(key, val string) {
+ c[key] = val
+}
+
+// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
+//
+// Example usage for server side:
+//
+// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+//
+// Example usage for client side:
+//
+// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+// err := tracer.Inject(
+// span.Context(),
+// opentracing.HTTPHeaders,
+// carrier)
+//
+type HTTPHeadersCarrier http.Header
+
+// Set conforms to the TextMapWriter interface.
+func (c HTTPHeadersCarrier) Set(key, val string) {
+ h := http.Header(c)
+ h.Set(key, val)
+}
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, vals := range c {
+ for _, v := range vals {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go
new file mode 100644
index 00000000..0d3fb534
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/span.go
@@ -0,0 +1,189 @@
+package opentracing
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go/log"
+)
+
+// SpanContext represents Span state that must propagate to descendant Spans and across process
+// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
+type SpanContext interface {
+ // ForeachBaggageItem grants access to all baggage items stored in the
+ // SpanContext.
+ // The handler function will be called for each baggage key/value pair.
+ // The ordering of items is not guaranteed.
+ //
+ // The bool return value indicates if the handler wants to continue iterating
+ // through the rest of the baggage items; for example if the handler is trying to
+ // find some baggage item by pattern matching the name, it can return false
+ // as soon as the item is found to stop further iterations.
+ ForeachBaggageItem(handler func(k, v string) bool)
+}
+
+// Span represents an active, un-finished span in the OpenTracing system.
+//
+// Spans are created by the Tracer interface.
+type Span interface {
+ // Sets the end timestamp and finalizes Span state.
+ //
+ // With the exception of calls to Context() (which are always allowed),
+ // Finish() must be the last call made to any span instance, and to do
+ // otherwise leads to undefined behavior.
+ Finish()
+ // FinishWithOptions is like Finish() but with explicit control over
+ // timestamps and log data.
+ FinishWithOptions(opts FinishOptions)
+
+ // Context() yields the SpanContext for this Span. Note that the return
+ // value of Context() is still valid after a call to Span.Finish(), as is
+ // a call to Span.Context() after a call to Span.Finish().
+ Context() SpanContext
+
+ // Sets or changes the operation name.
+ //
+ // Returns a reference to this Span for chaining.
+ SetOperationName(operationName string) Span
+
+ // Adds a tag to the span.
+ //
+ // If there is a pre-existing tag set for `key`, it is overwritten.
+ //
+ // Tag values can be numeric types, strings, or bools. The behavior of
+ // other tag value types is undefined at the OpenTracing level. If a
+ // tracing system does not know how to handle a particular value type, it
+ // may ignore the tag, but shall not panic.
+ //
+ // Returns a reference to this Span for chaining.
+ SetTag(key string, value interface{}) Span
+
+ // LogFields is an efficient and type-checked way to record key:value
+ // logging data about a Span, though the programming interface is a little
+ // more verbose than LogKV(). Here's an example:
+ //
+ // span.LogFields(
+ // log.String("event", "soft error"),
+ // log.String("type", "cache timeout"),
+ // log.Int("waited.millis", 1500))
+ //
+ // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
+ LogFields(fields ...log.Field)
+
+ // LogKV is a concise, readable way to record key:value logging data about
+ // a Span, though unfortunately this also makes it less efficient and less
+ // type-safe than LogFields(). Here's an example:
+ //
+ // span.LogKV(
+ // "event", "soft error",
+ // "type", "cache timeout",
+ // "waited.millis", 1500)
+ //
+ // For LogKV (as opposed to LogFields()), the parameters must appear as
+ // key-value pairs, like
+ //
+ // span.LogKV(key1, val1, key2, val2, key3, val3, ...)
+ //
+ // The keys must all be strings. The values may be strings, numeric types,
+ // bools, Go error instances, or arbitrary structs.
+ //
+ // (Note to implementors: consider the log.InterleavedKVToFields() helper)
+ LogKV(alternatingKeyValues ...interface{})
+
+ // SetBaggageItem sets a key:value pair on this Span and its SpanContext
+ // that also propagates to descendants of this Span.
+ //
+ // SetBaggageItem() enables powerful functionality given a full-stack
+ // opentracing integration (e.g., arbitrary application data from a mobile
+ // app can make it, transparently, all the way into the depths of a storage
+ // system), and with it some powerful costs: use this feature with care.
+ //
+ // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
+ // *future* causal descendants of the associated Span.
+ //
+ // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
+ // value is copied into every local *and remote* child of the associated
+ // Span, and that can add up to a lot of network and cpu overhead.
+ //
+ // Returns a reference to this Span for chaining.
+ SetBaggageItem(restrictedKey, value string) Span
+
+ // Gets the value for a baggage item given its key. Returns the empty string
+ // if the value isn't found in this Span.
+ BaggageItem(restrictedKey string) string
+
+ // Provides access to the Tracer that created this Span.
+ Tracer() Tracer
+
+ // Deprecated: use LogFields or LogKV
+ LogEvent(event string)
+ // Deprecated: use LogFields or LogKV
+ LogEventWithPayload(event string, payload interface{})
+ // Deprecated: use LogFields or LogKV
+ Log(data LogData)
+}
+
+// LogRecord is data associated with a single Span log. Every LogRecord
+// instance must specify at least one Field.
+type LogRecord struct {
+ Timestamp time.Time
+ Fields []log.Field
+}
+
+// FinishOptions allows Span.FinishWithOptions callers to override the finish
+// timestamp and provide log data via a bulk interface.
+type FinishOptions struct {
+ // FinishTime overrides the Span's finish time, or implicitly becomes
+ // time.Now() if FinishTime.IsZero().
+ //
+ // FinishTime must resolve to a timestamp that's >= the Span's StartTime
+ // (per StartSpanOptions).
+ FinishTime time.Time
+
+ // LogRecords allows the caller to specify the contents of many LogFields()
+ // calls with a single slice. May be nil.
+ //
+ // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
+ // be set explicitly). Also, they must be >= the Span's start timestamp and
+ // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
+ // behavior of FinishWithOptions() is undefined.
+ //
+ // If specified, the caller hands off ownership of LogRecords at
+ // FinishWithOptions() invocation time.
+ //
+ // If specified, the (deprecated) BulkLogData must be nil or empty.
+ LogRecords []LogRecord
+
+ // BulkLogData is DEPRECATED.
+ BulkLogData []LogData
+}
+
+// LogData is DEPRECATED
+type LogData struct {
+ Timestamp time.Time
+ Event string
+ Payload interface{}
+}
+
+// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
+func (ld *LogData) ToLogRecord() LogRecord {
+ var literalTimestamp time.Time
+ if ld.Timestamp.IsZero() {
+ literalTimestamp = time.Now()
+ } else {
+ literalTimestamp = ld.Timestamp
+ }
+ rval := LogRecord{
+ Timestamp: literalTimestamp,
+ }
+ if ld.Payload == nil {
+ rval.Fields = []log.Field{
+ log.String("event", ld.Event),
+ }
+ } else {
+ rval.Fields = []log.Field{
+ log.String("event", ld.Event),
+ log.Object("payload", ld.Payload),
+ }
+ }
+ return rval
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go
new file mode 100644
index 00000000..715f0ced
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/tracer.go
@@ -0,0 +1,304 @@
+package opentracing
+
+import "time"
+
+// Tracer is a simple, thin interface for Span creation and SpanContext
+// propagation.
+type Tracer interface {
+
+ // Create, start, and return a new Span with the given `operationName` and
+ // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
+ // from the "functional options" pattern, per
+ // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
+ //
+ // A Span with no SpanReference options (e.g., opentracing.ChildOf() or
+ // opentracing.FollowsFrom()) becomes the root of its own trace.
+ //
+ // Examples:
+ //
+ // var tracer opentracing.Tracer = ...
+ //
+ // // The root-span case:
+ // sp := tracer.StartSpan("GetFeed")
+ //
+ // // The vanilla child span case:
+ // sp := tracer.StartSpan(
+ // "GetFeed",
+ // opentracing.ChildOf(parentSpan.Context()))
+ //
+ // // All the bells and whistles:
+ // sp := tracer.StartSpan(
+ // "GetFeed",
+ // opentracing.ChildOf(parentSpan.Context()),
+ // opentracing.Tag{"user_agent", loggedReq.UserAgent},
+ // opentracing.StartTime(loggedReq.Timestamp),
+ // )
+ //
+ StartSpan(operationName string, opts ...StartSpanOption) Span
+
+ // Inject() takes the `sm` SpanContext instance and injects it for
+ // propagation within `carrier`. The actual type of `carrier` depends on
+ // the value of `format`.
+ //
+ // OpenTracing defines a common set of `format` values (see BuiltinFormat),
+ // and each has an expected carrier type.
+ //
+ // Other packages may declare their own `format` values, much like the keys
+ // used by `context.Context` (see https://godoc.org/context#WithValue).
+ //
+ // Example usage (sans error handling):
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // err := tracer.Inject(
+ // span.Context(),
+ // opentracing.HTTPHeaders,
+ // carrier)
+ //
+ // NOTE: All opentracing.Tracer implementations MUST support all
+ // BuiltinFormats.
+ //
+ // Implementations may return opentracing.ErrUnsupportedFormat if `format`
+ // is not supported by (or not known by) the implementation.
+ //
+ // Implementations may return opentracing.ErrInvalidCarrier or any other
+ // implementation-specific error if the format is supported but injection
+ // fails anyway.
+ //
+ // See Tracer.Extract().
+ Inject(sm SpanContext, format interface{}, carrier interface{}) error
+
+ // Extract() returns a SpanContext instance given `format` and `carrier`.
+ //
+ // OpenTracing defines a common set of `format` values (see BuiltinFormat),
+ // and each has an expected carrier type.
+ //
+ // Other packages may declare their own `format` values, much like the keys
+ // used by `context.Context` (see
+ // https://godoc.org/golang.org/x/net/context#WithValue).
+ //
+ // Example usage (with StartSpan):
+ //
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+ //
+ // // ... assuming the ultimate goal here is to resume the trace with a
+ // // server-side Span:
+ // var serverSpan opentracing.Span
+ // if err == nil {
+ // span = tracer.StartSpan(
+ // rpcMethodName, ext.RPCServerOption(clientContext))
+ // } else {
+ // span = tracer.StartSpan(rpcMethodName)
+ // }
+ //
+ //
+ // NOTE: All opentracing.Tracer implementations MUST support all
+ // BuiltinFormats.
+ //
+ // Return values:
+ // - A successful Extract returns a SpanContext instance and a nil error
+ // - If there was simply no SpanContext to extract in `carrier`, Extract()
+ // returns (nil, opentracing.ErrSpanContextNotFound)
+ // - If `format` is unsupported or unrecognized, Extract() returns (nil,
+ // opentracing.ErrUnsupportedFormat)
+ // - If there are more fundamental problems with the `carrier` object,
+ // Extract() may return opentracing.ErrInvalidCarrier,
+ // opentracing.ErrSpanContextCorrupted, or implementation-specific
+ // errors.
+ //
+ // See Tracer.Inject().
+ Extract(format interface{}, carrier interface{}) (SpanContext, error)
+}
+
+// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
+// mechanism to override the start timestamp, specify Span References, and make
+// a single Tag or multiple Tags available at Span start time.
+//
+// StartSpan() callers should look at the StartSpanOption interface and
+// implementations available in this package.
+//
+// Tracer implementations can convert a slice of `StartSpanOption` instances
+// into a `StartSpanOptions` struct like so:
+//
+// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+// sso := opentracing.StartSpanOptions{}
+// for _, o := range opts {
+// o.Apply(&sso)
+// }
+// ...
+// }
+//
+type StartSpanOptions struct {
+ // Zero or more causal references to other Spans (via their SpanContext).
+ // If empty, start a "root" Span (i.e., start a new trace).
+ References []SpanReference
+
+ // StartTime overrides the Span's start time, or implicitly becomes
+ // time.Now() if StartTime.IsZero().
+ StartTime time.Time
+
+ // Tags may have zero or more entries; the restrictions on map values are
+ // identical to those for Span.SetTag(). May be nil.
+ //
+ // If specified, the caller hands off ownership of Tags at
+ // StartSpan() invocation time.
+ Tags map[string]interface{}
+}
+
+// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
+//
+// StartSpanOption borrows from the "functional options" pattern, per
+// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type StartSpanOption interface {
+ Apply(*StartSpanOptions)
+}
+
+// SpanReferenceType is an enum type describing different categories of
+// relationships between two Spans. If Span-2 refers to Span-1, the
+// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
+// ChildOfRef means that Span-1 created Span-2.
+//
+// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
+// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
+// or Span-2 may be sitting in a distributed queue behind Span-1.
+type SpanReferenceType int
+
+const (
+ // ChildOfRef refers to a parent Span that caused *and* somehow depends
+ // upon the new child Span. Often (but not always), the parent Span cannot
+ // finish until the child Span does.
+ //
+ // An timing diagram for a ChildOfRef that's blocked on the new Span:
+ //
+ // [-Parent Span---------]
+ // [-Child Span----]
+ //
+ // See http://opentracing.io/spec/
+ //
+ // See opentracing.ChildOf()
+ ChildOfRef SpanReferenceType = iota
+
+ // FollowsFromRef refers to a parent Span that does not depend in any way
+ // on the result of the new child Span. For instance, one might use
+ // FollowsFromRefs to describe pipeline stages separated by queues,
+ // or a fire-and-forget cache insert at the tail end of a web request.
+ //
+ // A FollowsFromRef Span is part of the same logical trace as the new Span:
+ // i.e., the new Span is somehow caused by the work of its FollowsFromRef.
+ //
+ // All of the following could be valid timing diagrams for children that
+ // "FollowFrom" a parent.
+ //
+ // [-Parent Span-] [-Child Span-]
+ //
+ //
+ // [-Parent Span--]
+ // [-Child Span-]
+ //
+ //
+ // [-Parent Span-]
+ // [-Child Span-]
+ //
+ // See http://opentracing.io/spec/
+ //
+ // See opentracing.FollowsFrom()
+ FollowsFromRef
+)
+
+// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
+// referenced SpanContext. See the SpanReferenceType documentation for
+// supported relationships. If SpanReference is created with
+// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
+// syntax for starting spans:
+//
+// sc, _ := tracer.Extract(someFormat, someCarrier)
+// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
+//
+// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
+// not add the parent span reference to the options.
+type SpanReference struct {
+ Type SpanReferenceType
+ ReferencedContext SpanContext
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (r SpanReference) Apply(o *StartSpanOptions) {
+ if r.ReferencedContext != nil {
+ o.References = append(o.References, r)
+ }
+}
+
+// ChildOf returns a StartSpanOption pointing to a dependent parent span.
+// If sc == nil, the option has no effect.
+//
+// See ChildOfRef, SpanReference
+func ChildOf(sc SpanContext) SpanReference {
+ return SpanReference{
+ Type: ChildOfRef,
+ ReferencedContext: sc,
+ }
+}
+
+// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
+// the child Span but does not directly depend on its result in any way.
+// If sc == nil, the option has no effect.
+//
+// See FollowsFromRef, SpanReference
+func FollowsFrom(sc SpanContext) SpanReference {
+ return SpanReference{
+ Type: FollowsFromRef,
+ ReferencedContext: sc,
+ }
+}
+
+// StartTime is a StartSpanOption that sets an explicit start timestamp for the
+// new Span.
+type StartTime time.Time
+
+// Apply satisfies the StartSpanOption interface.
+func (t StartTime) Apply(o *StartSpanOptions) {
+ o.StartTime = time.Time(t)
+}
+
+// Tags are a generic map from an arbitrary string key to an opaque value type.
+// The underlying tracing system is responsible for interpreting and
+// serializing the values.
+type Tags map[string]interface{}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tags) Apply(o *StartSpanOptions) {
+ if o.Tags == nil {
+ o.Tags = make(map[string]interface{})
+ }
+ for k, v := range t {
+ o.Tags[k] = v
+ }
+}
+
+// Tag may be passed as a StartSpanOption to add a tag to new spans,
+// or its Set method may be used to apply the tag to an existing Span,
+// for example:
+//
+// tracer.StartSpan("opName", Tag{"Key", value})
+//
+// or
+//
+// Tag{"key", value}.Set(span)
+type Tag struct {
+ Key string
+ Value interface{}
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tag) Apply(o *StartSpanOptions) {
+ if o.Tags == nil {
+ o.Tags = make(map[string]interface{})
+ }
+ o.Tags[t.Key] = t.Value
+}
+
+// Set applies the tag to an existing Span.
+func (t Tag) Set(s Span) {
+ s.SetTag(t.Key, t.Value)
+}