From de4c7804101a47a01d0c9b88ea34d2b153e2b6b9 Mon Sep 17 00:00:00 2001 From: Wim Date: Sun, 10 Apr 2016 23:39:38 +0200 Subject: Vendor libs --- .../42wim/matterbridge-plus/bridge/LICENSE | 202 ++ .../42wim/matterbridge-plus/bridge/bridge.go | 441 ++++ .../42wim/matterbridge-plus/bridge/config.go | 65 + .../42wim/matterbridge-plus/bridge/helper.go | 59 + .../42wim/matterbridge-plus/matterclient/LICENSE | 202 ++ .../matterbridge-plus/matterclient/matterclient.go | 316 +++ .../42wim/matterbridge/matterhook/LICENSE | 202 ++ .../42wim/matterbridge/matterhook/matterhook.go | 154 ++ vendor/github.com/Sirupsen/logrus/LICENSE | 21 + vendor/github.com/Sirupsen/logrus/doc.go | 26 + vendor/github.com/Sirupsen/logrus/entry.go | 264 ++ .../Sirupsen/logrus/examples/basic/basic.go | 50 + .../Sirupsen/logrus/examples/hook/hook.go | 30 + vendor/github.com/Sirupsen/logrus/exported.go | 193 ++ vendor/github.com/Sirupsen/logrus/formatter.go | 48 + .../logrus/formatters/logstash/logstash.go | 61 + vendor/github.com/Sirupsen/logrus/hooks.go | 34 + .../Sirupsen/logrus/hooks/syslog/syslog.go | 54 + .../github.com/Sirupsen/logrus/hooks/test/test.go | 67 + .../github.com/Sirupsen/logrus/json_formatter.go | 41 + vendor/github.com/Sirupsen/logrus/logger.go | 212 ++ vendor/github.com/Sirupsen/logrus/logrus.go | 143 ++ vendor/github.com/Sirupsen/logrus/terminal_bsd.go | 9 + .../github.com/Sirupsen/logrus/terminal_linux.go | 12 + .../Sirupsen/logrus/terminal_notwindows.go | 21 + .../github.com/Sirupsen/logrus/terminal_solaris.go | 15 + .../github.com/Sirupsen/logrus/terminal_windows.go | 27 + .../github.com/Sirupsen/logrus/text_formatter.go | 161 ++ vendor/github.com/Sirupsen/logrus/writer.go | 31 + vendor/github.com/alecthomas/log4go/LICENSE | 13 + vendor/github.com/alecthomas/log4go/config.go | 288 +++ .../log4go/examples/ConsoleLogWriter_Manual.go | 14 + .../log4go/examples/FileLogWriter_Manual.go | 57 + .../log4go/examples/SimpleNetLogServer.go | 42 + .../log4go/examples/SocketLogWriter_Manual.go | 18 + .../log4go/examples/XMLConfigurationExample.go | 13 + vendor/github.com/alecthomas/log4go/filelog.go | 264 ++ vendor/github.com/alecthomas/log4go/log4go.go | 484 ++++ vendor/github.com/alecthomas/log4go/pattlog.go | 126 + vendor/github.com/alecthomas/log4go/socklog.go | 57 + vendor/github.com/alecthomas/log4go/termlog.go | 49 + vendor/github.com/alecthomas/log4go/wrapper.go | 278 ++ vendor/github.com/gorilla/schema/LICENSE | 27 + vendor/github.com/gorilla/schema/cache.go | 245 ++ vendor/github.com/gorilla/schema/converter.go | 145 ++ vendor/github.com/gorilla/schema/decoder.go | 299 +++ vendor/github.com/gorilla/schema/doc.go | 148 ++ vendor/github.com/gorilla/websocket/LICENSE | 22 + vendor/github.com/gorilla/websocket/client.go | 350 +++ vendor/github.com/gorilla/websocket/conn.go | 915 +++++++ vendor/github.com/gorilla/websocket/doc.go | 152 ++ .../gorilla/websocket/examples/autobahn/server.go | 246 ++ .../gorilla/websocket/examples/chat/conn.go | 105 + .../gorilla/websocket/examples/chat/hub.go | 51 + .../gorilla/websocket/examples/chat/main.go | 39 + .../gorilla/websocket/examples/command/main.go | 188 ++ .../gorilla/websocket/examples/echo/client.go | 81 + .../gorilla/websocket/examples/echo/server.go | 132 + .../gorilla/websocket/examples/filewatch/main.go | 193 ++ vendor/github.com/gorilla/websocket/json.go | 55 + vendor/github.com/gorilla/websocket/server.go | 260 ++ vendor/github.com/gorilla/websocket/util.go | 44 + vendor/github.com/jpillora/backoff/backoff.go | 69 + .../mattermost/platform/model/LICENSE.txt | 897 +++++++ .../github.com/mattermost/platform/model/access.go | 93 + .../mattermost/platform/model/analytics_row.go | 55 + .../github.com/mattermost/platform/model/audit.go | 39 + .../github.com/mattermost/platform/model/audits.go | 39 + .../mattermost/platform/model/authorize.go | 103 + .../mattermost/platform/model/channel.go | 136 + .../mattermost/platform/model/channel_count.go | 63 + .../mattermost/platform/model/channel_data.go | 43 + .../mattermost/platform/model/channel_extra.go | 49 + .../mattermost/platform/model/channel_list.go | 77 + .../mattermost/platform/model/channel_member.go | 108 + .../github.com/mattermost/platform/model/client.go | 1144 +++++++++ .../mattermost/platform/model/command.go | 153 ++ .../mattermost/platform/model/command_response.go | 41 + .../mattermost/platform/model/compliance.go | 132 + .../mattermost/platform/model/compliance_post.go | 104 + .../github.com/mattermost/platform/model/config.go | 554 ++++ .../github.com/mattermost/platform/model/file.go | 43 + .../mattermost/platform/model/file_info.go | 77 + .../mattermost/platform/model/gitlab/gitlab.go | 107 + .../mattermost/platform/model/incoming_webhook.go | 137 + .../github.com/mattermost/platform/model/ldap.go | 8 + .../mattermost/platform/model/license.go | 123 + .../mattermost/platform/model/message.go | 58 + .../github.com/mattermost/platform/model/oauth.go | 159 ++ .../mattermost/platform/model/outgoing_webhook.go | 151 ++ .../github.com/mattermost/platform/model/post.go | 169 ++ .../mattermost/platform/model/post_list.go | 100 + .../mattermost/platform/model/preference.go | 66 + .../mattermost/platform/model/preferences.go | 31 + .../mattermost/platform/model/push_notification.go | 49 + .../mattermost/platform/model/search_params.go | 170 ++ .../mattermost/platform/model/security_bulletin.go | 55 + .../mattermost/platform/model/session.go | 115 + .../mattermost/platform/model/suggest_command.go | 34 + .../github.com/mattermost/platform/model/system.go | 42 + .../github.com/mattermost/platform/model/team.go | 243 ++ .../mattermost/platform/model/team_signup.go | 40 + .../github.com/mattermost/platform/model/user.go | 481 ++++ .../github.com/mattermost/platform/model/utils.go | 381 +++ .../mattermost/platform/model/version.go | 127 + vendor/github.com/nicksnyder/go-i18n/i18n/LICENSE | 19 + .../nicksnyder/go-i18n/i18n/bundle/bundle.go | 315 +++ vendor/github.com/nicksnyder/go-i18n/i18n/i18n.go | 152 ++ .../go-i18n/i18n/language/codegen/main.go | 132 + .../go-i18n/i18n/language/codegen/xml.go | 143 ++ .../nicksnyder/go-i18n/i18n/language/language.go | 99 + .../nicksnyder/go-i18n/i18n/language/operands.go | 119 + .../nicksnyder/go-i18n/i18n/language/plural.go | 40 + .../nicksnyder/go-i18n/i18n/language/pluralspec.go | 74 + .../go-i18n/i18n/language/pluralspec_gen.go | 567 ++++ .../go-i18n/i18n/translation/plural_translation.go | 78 + .../go-i18n/i18n/translation/single_translation.go | 57 + .../go-i18n/i18n/translation/template.go | 61 + .../go-i18n/i18n/translation/translation.go | 83 + vendor/github.com/pborman/uuid/LICENSE | 27 + vendor/github.com/pborman/uuid/dce.go | 84 + vendor/github.com/pborman/uuid/doc.go | 8 + vendor/github.com/pborman/uuid/hash.go | 53 + vendor/github.com/pborman/uuid/json.go | 34 + vendor/github.com/pborman/uuid/node.go | 117 + vendor/github.com/pborman/uuid/sql.go | 66 + vendor/github.com/pborman/uuid/time.go | 132 + vendor/github.com/pborman/uuid/util.go | 43 + vendor/github.com/pborman/uuid/uuid.go | 201 ++ vendor/github.com/pborman/uuid/version1.go | 41 + vendor/github.com/pborman/uuid/version4.go | 25 + vendor/github.com/peterhellberg/giphy/client.go | 117 + .../peterhellberg/giphy/cmd/giphy/main.go | 140 + vendor/github.com/peterhellberg/giphy/env.go | 34 + vendor/github.com/peterhellberg/giphy/errors.go | 17 + vendor/github.com/peterhellberg/giphy/gif.go | 44 + vendor/github.com/peterhellberg/giphy/random.go | 37 + vendor/github.com/peterhellberg/giphy/search.go | 24 + vendor/github.com/peterhellberg/giphy/translate.go | 37 + vendor/github.com/peterhellberg/giphy/trending.go | 23 + vendor/github.com/peterhellberg/giphy/types.go | 116 + vendor/github.com/sorcix/irc/LICENSE | 22 + vendor/github.com/sorcix/irc/constants.go | 298 +++ vendor/github.com/sorcix/irc/ctcp/ctcp.go | 144 ++ vendor/github.com/sorcix/irc/ctcp/doc.go | 31 + vendor/github.com/sorcix/irc/doc.go | 36 + vendor/github.com/sorcix/irc/message.go | 308 +++ vendor/github.com/sorcix/irc/stream.go | 134 + vendor/github.com/sorcix/irc/strings.go | 17 + vendor/github.com/sorcix/irc/strings_legacy.go | 22 + vendor/github.com/thoj/go-ircevent/LICENSE | 27 + vendor/github.com/thoj/go-ircevent/irc.go | 473 ++++ vendor/github.com/thoj/go-ircevent/irc_callback.go | 221 ++ vendor/github.com/thoj/go-ircevent/irc_struct.go | 68 + .../github.com/thoj/go-ircevent/irc_test_fuzz.go | 14 + vendor/golang.org/x/crypto/bcrypt/LICENSE | 27 + vendor/golang.org/x/crypto/bcrypt/base64.go | 35 + vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 294 +++ vendor/golang.org/x/crypto/blowfish/LICENSE | 27 + vendor/golang.org/x/crypto/blowfish/block.go | 159 ++ vendor/golang.org/x/crypto/blowfish/cipher.go | 91 + vendor/golang.org/x/crypto/blowfish/const.go | 199 ++ vendor/golang.org/x/net/http2/hpack/LICENSE | 27 + vendor/golang.org/x/net/http2/hpack/encode.go | 251 ++ vendor/golang.org/x/net/http2/hpack/hpack.go | 542 ++++ vendor/golang.org/x/net/http2/hpack/huffman.go | 190 ++ vendor/golang.org/x/net/http2/hpack/tables.go | 352 +++ vendor/gopkg.in/gcfg.v1/LICENSE | 28 + vendor/gopkg.in/gcfg.v1/doc.go | 118 + vendor/gopkg.in/gcfg.v1/go1_0.go | 7 + vendor/gopkg.in/gcfg.v1/go1_2.go | 9 + vendor/gopkg.in/gcfg.v1/read.go | 188 ++ vendor/gopkg.in/gcfg.v1/scanner/errors.go | 121 + vendor/gopkg.in/gcfg.v1/scanner/scanner.go | 342 +++ vendor/gopkg.in/gcfg.v1/set.go | 293 +++ vendor/gopkg.in/gcfg.v1/token/position.go | 435 ++++ vendor/gopkg.in/gcfg.v1/token/serialize.go | 56 + vendor/gopkg.in/gcfg.v1/token/token.go | 83 + vendor/gopkg.in/gcfg.v1/types/bool.go | 23 + vendor/gopkg.in/gcfg.v1/types/doc.go | 4 + vendor/gopkg.in/gcfg.v1/types/enum.go | 44 + vendor/gopkg.in/gcfg.v1/types/int.go | 86 + vendor/gopkg.in/gcfg.v1/types/scan.go | 23 + vendor/gopkg.in/yaml.v2/LICENSE | 188 ++ vendor/gopkg.in/yaml.v2/apic.go | 742 ++++++ vendor/gopkg.in/yaml.v2/decode.go | 683 +++++ vendor/gopkg.in/yaml.v2/emitterc.go | 1685 ++++++++++++ vendor/gopkg.in/yaml.v2/encode.go | 306 +++ vendor/gopkg.in/yaml.v2/parserc.go | 1096 ++++++++ vendor/gopkg.in/yaml.v2/readerc.go | 394 +++ vendor/gopkg.in/yaml.v2/resolve.go | 203 ++ vendor/gopkg.in/yaml.v2/scannerc.go | 2710 ++++++++++++++++++++ vendor/gopkg.in/yaml.v2/sorter.go | 104 + vendor/gopkg.in/yaml.v2/writerc.go | 89 + vendor/gopkg.in/yaml.v2/yaml.go | 346 +++ vendor/gopkg.in/yaml.v2/yamlh.go | 716 ++++++ vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 ++ vendor/manifest | 146 ++ 198 files changed, 34235 insertions(+) create mode 100644 vendor/github.com/42wim/matterbridge-plus/bridge/LICENSE create mode 100644 vendor/github.com/42wim/matterbridge-plus/bridge/bridge.go create mode 100644 vendor/github.com/42wim/matterbridge-plus/bridge/config.go create mode 100644 vendor/github.com/42wim/matterbridge-plus/bridge/helper.go create mode 100644 vendor/github.com/42wim/matterbridge-plus/matterclient/LICENSE create mode 100644 vendor/github.com/42wim/matterbridge-plus/matterclient/matterclient.go create mode 100644 vendor/github.com/42wim/matterbridge/matterhook/LICENSE create mode 100644 vendor/github.com/42wim/matterbridge/matterhook/matterhook.go create mode 100644 vendor/github.com/Sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/Sirupsen/logrus/doc.go create mode 100644 vendor/github.com/Sirupsen/logrus/entry.go create mode 100644 vendor/github.com/Sirupsen/logrus/examples/basic/basic.go create mode 100644 vendor/github.com/Sirupsen/logrus/examples/hook/hook.go create mode 100644 vendor/github.com/Sirupsen/logrus/exported.go create mode 100644 vendor/github.com/Sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/test/test.go create mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/logger.go create mode 100644 vendor/github.com/Sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_solaris.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/writer.go create mode 100644 vendor/github.com/alecthomas/log4go/LICENSE create mode 100644 vendor/github.com/alecthomas/log4go/config.go create mode 100644 vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go create mode 100644 vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go create mode 100644 vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go create mode 100644 vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go create mode 100644 vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go create mode 100644 vendor/github.com/alecthomas/log4go/filelog.go create mode 100644 vendor/github.com/alecthomas/log4go/log4go.go create mode 100644 vendor/github.com/alecthomas/log4go/pattlog.go create mode 100644 vendor/github.com/alecthomas/log4go/socklog.go create mode 100644 vendor/github.com/alecthomas/log4go/termlog.go create mode 100644 vendor/github.com/alecthomas/log4go/wrapper.go create mode 100644 vendor/github.com/gorilla/schema/LICENSE create mode 100644 vendor/github.com/gorilla/schema/cache.go create mode 100644 vendor/github.com/gorilla/schema/converter.go create mode 100644 vendor/github.com/gorilla/schema/decoder.go create mode 100644 vendor/github.com/gorilla/schema/doc.go create mode 100644 vendor/github.com/gorilla/websocket/LICENSE create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/examples/autobahn/server.go create mode 100644 vendor/github.com/gorilla/websocket/examples/chat/conn.go create mode 100644 vendor/github.com/gorilla/websocket/examples/chat/hub.go create mode 100644 vendor/github.com/gorilla/websocket/examples/chat/main.go create mode 100644 vendor/github.com/gorilla/websocket/examples/command/main.go create mode 100644 vendor/github.com/gorilla/websocket/examples/echo/client.go create mode 100644 vendor/github.com/gorilla/websocket/examples/echo/server.go create mode 100644 vendor/github.com/gorilla/websocket/examples/filewatch/main.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/jpillora/backoff/backoff.go create mode 100644 vendor/github.com/mattermost/platform/model/LICENSE.txt create mode 100644 vendor/github.com/mattermost/platform/model/access.go create mode 100644 vendor/github.com/mattermost/platform/model/analytics_row.go create mode 100644 vendor/github.com/mattermost/platform/model/audit.go create mode 100644 vendor/github.com/mattermost/platform/model/audits.go create mode 100644 vendor/github.com/mattermost/platform/model/authorize.go create mode 100644 vendor/github.com/mattermost/platform/model/channel.go create mode 100644 vendor/github.com/mattermost/platform/model/channel_count.go create mode 100644 vendor/github.com/mattermost/platform/model/channel_data.go create mode 100644 vendor/github.com/mattermost/platform/model/channel_extra.go create mode 100644 vendor/github.com/mattermost/platform/model/channel_list.go create mode 100644 vendor/github.com/mattermost/platform/model/channel_member.go create mode 100644 vendor/github.com/mattermost/platform/model/client.go create mode 100644 vendor/github.com/mattermost/platform/model/command.go create mode 100644 vendor/github.com/mattermost/platform/model/command_response.go create mode 100644 vendor/github.com/mattermost/platform/model/compliance.go create mode 100644 vendor/github.com/mattermost/platform/model/compliance_post.go create mode 100644 vendor/github.com/mattermost/platform/model/config.go create mode 100644 vendor/github.com/mattermost/platform/model/file.go create mode 100644 vendor/github.com/mattermost/platform/model/file_info.go create mode 100644 vendor/github.com/mattermost/platform/model/gitlab/gitlab.go create mode 100644 vendor/github.com/mattermost/platform/model/incoming_webhook.go create mode 100644 vendor/github.com/mattermost/platform/model/ldap.go create mode 100644 vendor/github.com/mattermost/platform/model/license.go create mode 100644 vendor/github.com/mattermost/platform/model/message.go create mode 100644 vendor/github.com/mattermost/platform/model/oauth.go create mode 100644 vendor/github.com/mattermost/platform/model/outgoing_webhook.go create mode 100644 vendor/github.com/mattermost/platform/model/post.go create mode 100644 vendor/github.com/mattermost/platform/model/post_list.go create mode 100644 vendor/github.com/mattermost/platform/model/preference.go create mode 100644 vendor/github.com/mattermost/platform/model/preferences.go create mode 100644 vendor/github.com/mattermost/platform/model/push_notification.go create mode 100644 vendor/github.com/mattermost/platform/model/search_params.go create mode 100644 vendor/github.com/mattermost/platform/model/security_bulletin.go create mode 100644 vendor/github.com/mattermost/platform/model/session.go create mode 100644 vendor/github.com/mattermost/platform/model/suggest_command.go create mode 100644 vendor/github.com/mattermost/platform/model/system.go create mode 100644 vendor/github.com/mattermost/platform/model/team.go create mode 100644 vendor/github.com/mattermost/platform/model/team_signup.go create mode 100644 vendor/github.com/mattermost/platform/model/user.go create mode 100644 vendor/github.com/mattermost/platform/model/utils.go create mode 100644 vendor/github.com/mattermost/platform/model/version.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/LICENSE create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/i18n.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/xml.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/language/plural.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/translation/template.go create mode 100644 vendor/github.com/nicksnyder/go-i18n/i18n/translation/translation.go create mode 100644 vendor/github.com/pborman/uuid/LICENSE create mode 100644 vendor/github.com/pborman/uuid/dce.go create mode 100644 vendor/github.com/pborman/uuid/doc.go create mode 100644 vendor/github.com/pborman/uuid/hash.go create mode 100644 vendor/github.com/pborman/uuid/json.go create mode 100644 vendor/github.com/pborman/uuid/node.go create mode 100644 vendor/github.com/pborman/uuid/sql.go create mode 100644 vendor/github.com/pborman/uuid/time.go create mode 100644 vendor/github.com/pborman/uuid/util.go create mode 100644 vendor/github.com/pborman/uuid/uuid.go create mode 100644 vendor/github.com/pborman/uuid/version1.go create mode 100644 vendor/github.com/pborman/uuid/version4.go create mode 100644 vendor/github.com/peterhellberg/giphy/client.go create mode 100644 vendor/github.com/peterhellberg/giphy/cmd/giphy/main.go create mode 100644 vendor/github.com/peterhellberg/giphy/env.go create mode 100644 vendor/github.com/peterhellberg/giphy/errors.go create mode 100644 vendor/github.com/peterhellberg/giphy/gif.go create mode 100644 vendor/github.com/peterhellberg/giphy/random.go create mode 100644 vendor/github.com/peterhellberg/giphy/search.go create mode 100644 vendor/github.com/peterhellberg/giphy/translate.go create mode 100644 vendor/github.com/peterhellberg/giphy/trending.go create mode 100644 vendor/github.com/peterhellberg/giphy/types.go create mode 100644 vendor/github.com/sorcix/irc/LICENSE create mode 100644 vendor/github.com/sorcix/irc/constants.go create mode 100644 vendor/github.com/sorcix/irc/ctcp/ctcp.go create mode 100644 vendor/github.com/sorcix/irc/ctcp/doc.go create mode 100644 vendor/github.com/sorcix/irc/doc.go create mode 100644 vendor/github.com/sorcix/irc/message.go create mode 100644 vendor/github.com/sorcix/irc/stream.go create mode 100644 vendor/github.com/sorcix/irc/strings.go create mode 100644 vendor/github.com/sorcix/irc/strings_legacy.go create mode 100644 vendor/github.com/thoj/go-ircevent/LICENSE create mode 100644 vendor/github.com/thoj/go-ircevent/irc.go create mode 100644 vendor/github.com/thoj/go-ircevent/irc_callback.go create mode 100644 vendor/github.com/thoj/go-ircevent/irc_struct.go create mode 100644 vendor/github.com/thoj/go-ircevent/irc_test_fuzz.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/LICENSE create mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go create mode 100644 vendor/golang.org/x/crypto/blowfish/LICENSE create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go create mode 100644 vendor/golang.org/x/net/http2/hpack/LICENSE create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 vendor/gopkg.in/gcfg.v1/LICENSE create mode 100644 vendor/gopkg.in/gcfg.v1/doc.go create mode 100644 vendor/gopkg.in/gcfg.v1/go1_0.go create mode 100644 vendor/gopkg.in/gcfg.v1/go1_2.go create mode 100644 vendor/gopkg.in/gcfg.v1/read.go create mode 100644 vendor/gopkg.in/gcfg.v1/scanner/errors.go create mode 100644 vendor/gopkg.in/gcfg.v1/scanner/scanner.go create mode 100644 vendor/gopkg.in/gcfg.v1/set.go create mode 100644 vendor/gopkg.in/gcfg.v1/token/position.go create mode 100644 vendor/gopkg.in/gcfg.v1/token/serialize.go create mode 100644 vendor/gopkg.in/gcfg.v1/token/token.go create mode 100644 vendor/gopkg.in/gcfg.v1/types/bool.go create mode 100644 vendor/gopkg.in/gcfg.v1/types/doc.go create mode 100644 vendor/gopkg.in/gcfg.v1/types/enum.go create mode 100644 vendor/gopkg.in/gcfg.v1/types/int.go create mode 100644 vendor/gopkg.in/gcfg.v1/types/scan.go create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 vendor/gopkg.in/yaml.v2/apic.go create mode 100644 vendor/gopkg.in/yaml.v2/decode.go create mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v2/encode.go create mode 100644 vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go create mode 100644 vendor/manifest diff --git a/vendor/github.com/42wim/matterbridge-plus/bridge/LICENSE b/vendor/github.com/42wim/matterbridge-plus/bridge/LICENSE new file mode 100644 index 00000000..8f71f43f --- /dev/null +++ b/vendor/github.com/42wim/matterbridge-plus/bridge/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/42wim/matterbridge-plus/bridge/bridge.go b/vendor/github.com/42wim/matterbridge-plus/bridge/bridge.go new file mode 100644 index 00000000..ebd5456d --- /dev/null +++ b/vendor/github.com/42wim/matterbridge-plus/bridge/bridge.go @@ -0,0 +1,441 @@ +package bridge + +import ( + "crypto/tls" + "github.com/42wim/matterbridge-plus/matterclient" + "github.com/42wim/matterbridge/matterhook" + log "github.com/Sirupsen/logrus" + "github.com/peterhellberg/giphy" + ircm "github.com/sorcix/irc" + "github.com/thoj/go-ircevent" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +//type Bridge struct { +type MMhook struct { + mh *matterhook.Client +} + +type MMapi struct { + mc *matterclient.MMClient + mmMap map[string]string +} + +type MMirc struct { + i *irc.Connection + ircNick string + ircMap map[string]string + names map[string][]string +} + +type MMMessage struct { + Text string + Channel string + Username string +} + +type Bridge struct { + MMhook + MMapi + MMirc + *Config + kind string +} + +type FancyLog struct { + irc *log.Entry + mm *log.Entry +} + +var flog FancyLog + +func initFLog() { + flog.irc = log.WithFields(log.Fields{"module": "irc"}) + flog.mm = log.WithFields(log.Fields{"module": "mattermost"}) +} + +func NewBridge(name string, config *Config, kind string) *Bridge { + initFLog() + b := &Bridge{} + b.Config = config + b.kind = kind + b.ircNick = b.Config.IRC.Nick + b.ircMap = make(map[string]string) + b.MMirc.names = make(map[string][]string) + if kind == "legacy" { + if len(b.Config.Token) > 0 { + for _, val := range b.Config.Token { + b.ircMap[val.IRCChannel] = val.MMChannel + } + } + + b.mh = matterhook.New(b.Config.Mattermost.URL, + matterhook.Config{Port: b.Config.Mattermost.Port, Token: b.Config.Mattermost.Token, + InsecureSkipVerify: b.Config.Mattermost.SkipTLSVerify, + BindAddress: b.Config.Mattermost.BindAddress}) + } else { + b.mmMap = make(map[string]string) + if len(b.Config.Channel) > 0 { + for _, val := range b.Config.Channel { + b.ircMap[val.IRC] = val.Mattermost + b.mmMap[val.Mattermost] = val.IRC + } + } + b.mc = matterclient.New(b.Config.Mattermost.Login, b.Config.Mattermost.Password, + b.Config.Mattermost.Team, b.Config.Mattermost.Server) + err := b.mc.Login() + if err != nil { + flog.mm.Fatal("can not connect", err) + } + b.mc.JoinChannel(b.Config.Mattermost.Channel) + if len(b.Config.Channel) > 0 { + for _, val := range b.Config.Channel { + b.mc.JoinChannel(val.Mattermost) + } + } + go b.mc.WsReceiver() + } + b.i = b.createIRC(name) + go b.handleMatter() + return b +} + +func (b *Bridge) createIRC(name string) *irc.Connection { + i := irc.IRC(b.Config.IRC.Nick, b.Config.IRC.Nick) + i.UseTLS = b.Config.IRC.UseTLS + i.TLSConfig = &tls.Config{InsecureSkipVerify: b.Config.IRC.SkipTLSVerify} + if b.Config.IRC.Password != "" { + i.Password = b.Config.IRC.Password + } + i.AddCallback("*", b.handleOther) + i.Connect(b.Config.IRC.Server + ":" + strconv.Itoa(b.Config.IRC.Port)) + return i +} + +func (b *Bridge) handleNewConnection(event *irc.Event) { + b.ircNick = event.Arguments[0] + b.setupChannels() +} + +func (b *Bridge) setupChannels() { + i := b.i + flog.irc.Info("Joining ", b.Config.IRC.Channel, " as ", b.ircNick) + i.Join(b.Config.IRC.Channel) + if b.kind == "legacy" { + for _, val := range b.Config.Token { + flog.irc.Info("Joining ", val.IRCChannel, " as ", b.ircNick) + i.Join(val.IRCChannel) + } + } else { + for _, val := range b.Config.Channel { + flog.irc.Info("Joining ", val.IRC, " as ", b.ircNick) + i.Join(val.IRC) + } + } + i.AddCallback("PRIVMSG", b.handlePrivMsg) + i.AddCallback("CTCP_ACTION", b.handlePrivMsg) + if b.Config.Mattermost.ShowJoinPart { + i.AddCallback("JOIN", b.handleJoinPart) + i.AddCallback("PART", b.handleJoinPart) + } +} + +func (b *Bridge) handleIrcBotCommand(event *irc.Event) bool { + parts := strings.Fields(event.Message()) + exp, _ := regexp.Compile("[:,]+$") + channel := event.Arguments[0] + command := "" + if len(parts) == 2 { + command = parts[1] + } + if exp.ReplaceAllString(parts[0], "") == b.ircNick { + switch command { + case "users": + usernames := b.mc.UsernamesInChannel(b.getMMChannel(channel)) + sort.Strings(usernames) + b.i.Privmsg(channel, "Users on Mattermost: "+strings.Join(usernames, ", ")) + default: + b.i.Privmsg(channel, "Valid commands are: [users, help]") + } + return true + } + return false +} + +func (b *Bridge) ircNickFormat(nick string) string { + if nick == b.ircNick { + return nick + } + if b.Config.Mattermost.RemoteNickFormat == nil { + return "irc-" + nick + } + return strings.Replace(*b.Config.Mattermost.RemoteNickFormat, "{NICK}", nick, -1) +} + +func (b *Bridge) handlePrivMsg(event *irc.Event) { + if b.handleIrcBotCommand(event) { + return + } + msg := "" + if event.Code == "CTCP_ACTION" { + msg = event.Nick + " " + } + msg += event.Message() + b.Send(b.ircNickFormat(event.Nick), msg, b.getMMChannel(event.Arguments[0])) +} + +func (b *Bridge) handleJoinPart(event *irc.Event) { + b.Send(b.ircNick, b.ircNickFormat(event.Nick)+" "+strings.ToLower(event.Code)+"s "+event.Message(), b.getMMChannel(event.Arguments[0])) +} + +func (b *Bridge) handleNotice(event *irc.Event) { + if strings.Contains(event.Message(), "This nickname is registered") { + b.i.Privmsg(b.Config.IRC.NickServNick, "IDENTIFY "+b.Config.IRC.NickServPassword) + } +} + +func (b *Bridge) nicksPerRow() int { + if b.Config.Mattermost.NicksPerRow < 1 { + return 4 + } + return b.Config.Mattermost.NicksPerRow +} + +func (b *Bridge) formatnicks(nicks []string, continued bool) string { + switch b.Config.Mattermost.NickFormatter { + case "table": + return tableformatter(nicks, b.nicksPerRow(), continued) + default: + return plainformatter(nicks, b.nicksPerRow()) + } +} + +func (b *Bridge) storeNames(event *irc.Event) { + channel := event.Arguments[2] + b.MMirc.names[channel] = append( + b.MMirc.names[channel], + strings.Split(strings.TrimSpace(event.Message()), " ")...) +} + +func (b *Bridge) endNames(event *irc.Event) { + channel := event.Arguments[1] + sort.Strings(b.MMirc.names[channel]) + maxNamesPerPost := (300 / b.nicksPerRow()) * b.nicksPerRow() + continued := false + for len(b.MMirc.names[channel]) > maxNamesPerPost { + b.Send( + b.ircNick, + b.formatnicks(b.MMirc.names[channel][0:maxNamesPerPost], continued), + b.getMMChannel(channel)) + b.MMirc.names[channel] = b.MMirc.names[channel][maxNamesPerPost:] + continued = true + } + b.Send(b.ircNick, b.formatnicks(b.MMirc.names[channel], continued), b.getMMChannel(channel)) + b.MMirc.names[channel] = nil +} + +func (b *Bridge) handleTopicWhoTime(event *irc.Event) bool { + parts := strings.Split(event.Arguments[2], "!") + t_i, err := strconv.ParseInt(event.Arguments[3], 10, 64) + if err != nil { + flog.irc.Errorf("Invalid time stamp: %s", event.Arguments[3]) + return false + } + user := parts[0] + if len(parts) > 1 { + user += " [" + parts[1] + "]" + } + flog.irc.Infof("%s: Topic set by %s [%s]", event.Code, user, time.Unix(t_i, 0)) + return true +} + +func (b *Bridge) handleOther(event *irc.Event) { + flog.irc.Debugf("%#v", event) + switch event.Code { + case ircm.RPL_WELCOME: + b.handleNewConnection(event) + case ircm.RPL_ENDOFNAMES: + b.endNames(event) + case ircm.RPL_NAMREPLY: + b.storeNames(event) + case ircm.RPL_ISUPPORT: + fallthrough + case ircm.RPL_LUSEROP: + fallthrough + case ircm.RPL_LUSERUNKNOWN: + fallthrough + case ircm.RPL_LUSERCHANNELS: + fallthrough + case ircm.RPL_MYINFO: + flog.irc.Infof("%s: %s", event.Code, strings.Join(event.Arguments[1:], " ")) + case ircm.RPL_YOURHOST: + fallthrough + case ircm.RPL_CREATED: + fallthrough + case ircm.RPL_STATSDLINE: + fallthrough + case ircm.RPL_LUSERCLIENT: + fallthrough + case ircm.RPL_LUSERME: + fallthrough + case ircm.RPL_LOCALUSERS: + fallthrough + case ircm.RPL_GLOBALUSERS: + fallthrough + case ircm.RPL_MOTD: + flog.irc.Infof("%s: %s", event.Code, event.Message()) + // flog.irc.Info(event.Message()) + case ircm.RPL_TOPIC: + flog.irc.Infof("%s: Topic for %s: %s", event.Code, event.Arguments[1], event.Message()) + case ircm.RPL_TOPICWHOTIME: + if !b.handleTopicWhoTime(event) { + break + } + case ircm.MODE: + flog.irc.Infof("%s: %s %s", event.Code, event.Arguments[1], event.Arguments[0]) + case ircm.JOIN: + fallthrough + case ircm.PING: + fallthrough + case ircm.PONG: + flog.irc.Infof("%s: %s", event.Code, event.Message()) + case ircm.RPL_ENDOFMOTD: + case ircm.RPL_MOTDSTART: + case ircm.ERR_NICKNAMEINUSE: + flog.irc.Warn(event.Message()) + case ircm.NOTICE: + b.handleNotice(event) + default: + flog.irc.Infof("UNKNOWN EVENT: %#v", event) + return + } +} + +func (b *Bridge) Send(nick string, message string, channel string) error { + return b.SendType(nick, message, channel, "") +} + +func (b *Bridge) SendType(nick string, message string, channel string, mtype string) error { + if b.Config.Mattermost.PrefixMessagesWithNick { + if IsMarkup(message) { + message = nick + "\n\n" + message + } else { + message = nick + " " + message + } + } + if b.kind == "legacy" { + matterMessage := matterhook.OMessage{IconURL: b.Config.Mattermost.IconURL} + matterMessage.Channel = channel + matterMessage.UserName = nick + matterMessage.Type = mtype + matterMessage.Text = message + err := b.mh.Send(matterMessage) + if err != nil { + flog.mm.Info(err) + return err + } + return nil + } + flog.mm.Debug("->mattermost channel: ", channel, " ", message) + b.mc.PostMessage(channel, message) + return nil +} + +func (b *Bridge) handleMatterHook(mchan chan *MMMessage) { + for { + message := b.mh.Receive() + m := &MMMessage{} + m.Username = message.UserName + m.Text = message.Text + m.Channel = message.Token + mchan <- m + } +} + +func (b *Bridge) handleMatterClient(mchan chan *MMMessage) { + for message := range b.mc.MessageChan { + // do not post our own messages back to irc + if message.Raw.Action == "posted" && b.mc.User.Username != message.Username { + m := &MMMessage{} + m.Username = message.Username + m.Channel = message.Channel + m.Text = message.Text + flog.mm.Debugf("<-mattermost channel: %s %#v %#v", message.Channel, message.Post, message.Raw) + mchan <- m + } + } +} + +func (b *Bridge) handleMatter() { + mchan := make(chan *MMMessage) + if b.kind == "legacy" { + go b.handleMatterHook(mchan) + } else { + go b.handleMatterClient(mchan) + } + for message := range mchan { + var username string + username = message.Username + ": " + if b.Config.IRC.RemoteNickFormat != "" { + username = strings.Replace(b.Config.IRC.RemoteNickFormat, "{NICK}", message.Username, -1) + } else if b.Config.IRC.UseSlackCircumfix { + username = "<" + message.Username + "> " + } + cmd := strings.Fields(message.Text)[0] + switch cmd { + case "!users": + flog.mm.Info("received !users from ", message.Username) + b.i.SendRaw("NAMES " + b.getIRCChannel(message.Channel)) + continue + case "!gif": + message.Text = b.giphyRandom(strings.Fields(strings.Replace(message.Text, "!gif ", "", 1))) + b.Send(b.ircNick, message.Text, b.getIRCChannel(message.Channel)) + continue + } + texts := strings.Split(message.Text, "\n") + for _, text := range texts { + flog.mm.Debug("Sending message from " + message.Username + " to " + message.Channel) + b.i.Privmsg(b.getIRCChannel(message.Channel), username+text) + } + } +} + +func (b *Bridge) giphyRandom(query []string) string { + g := giphy.DefaultClient + if b.Config.General.GiphyAPIKey != "" { + g.APIKey = b.Config.General.GiphyAPIKey + } + res, err := g.Random(query) + if err != nil { + return "error" + } + return res.Data.FixedHeightDownsampledURL +} + +func (b *Bridge) getMMChannel(ircChannel string) string { + mmchannel, ok := b.ircMap[ircChannel] + if !ok { + mmchannel = b.Config.Mattermost.Channel + } + return mmchannel +} + +func (b *Bridge) getIRCChannel(channel string) string { + if b.kind == "legacy" { + ircchannel := b.Config.IRC.Channel + _, ok := b.Config.Token[channel] + if ok { + ircchannel = b.Config.Token[channel].IRCChannel + } + return ircchannel + } + ircchannel, ok := b.mmMap[channel] + if !ok { + ircchannel = b.Config.IRC.Channel + } + return ircchannel +} diff --git a/vendor/github.com/42wim/matterbridge-plus/bridge/config.go b/vendor/github.com/42wim/matterbridge-plus/bridge/config.go new file mode 100644 index 00000000..ca4eb225 --- /dev/null +++ b/vendor/github.com/42wim/matterbridge-plus/bridge/config.go @@ -0,0 +1,65 @@ +package bridge + +import ( + "gopkg.in/gcfg.v1" + "io/ioutil" + "log" +) + +type Config struct { + IRC struct { + UseTLS bool + SkipTLSVerify bool + Server string + Port int + Nick string + Password string + Channel string + UseSlackCircumfix bool + NickServNick string + NickServPassword string + RemoteNickFormat string + } + Mattermost struct { + URL string + Port int + ShowJoinPart bool + Token string + IconURL string + SkipTLSVerify bool + BindAddress string + Channel string + PrefixMessagesWithNick bool + NicksPerRow int + NickFormatter string + Server string + Team string + Login string + Password string + RemoteNickFormat *string + } + Token map[string]*struct { + IRCChannel string + MMChannel string + } + Channel map[string]*struct { + IRC string + Mattermost string + } + General struct { + GiphyAPIKey string + } +} + +func NewConfig(cfgfile string) *Config { + var cfg Config + content, err := ioutil.ReadFile(cfgfile) + if err != nil { + log.Fatal(err) + } + err = gcfg.ReadStringInto(&cfg, string(content)) + if err != nil { + log.Fatal("Failed to parse "+cfgfile+":", err) + } + return &cfg +} diff --git a/vendor/github.com/42wim/matterbridge-plus/bridge/helper.go b/vendor/github.com/42wim/matterbridge-plus/bridge/helper.go new file mode 100644 index 00000000..7669ad57 --- /dev/null +++ b/vendor/github.com/42wim/matterbridge-plus/bridge/helper.go @@ -0,0 +1,59 @@ +package bridge + +import ( + "strings" +) + +func tableformatter(nicks []string, nicksPerRow int, continued bool) string { + result := "|IRC users" + if continued { + result = "|(continued)" + } + for i := 0; i < 2; i++ { + for j := 1; j <= nicksPerRow && j <= len(nicks); j++ { + if i == 0 { + result += "|" + } else { + result += ":-|" + } + } + result += "\r\n|" + } + result += nicks[0] + "|" + for i := 1; i < len(nicks); i++ { + if i%nicksPerRow == 0 { + result += "\r\n|" + nicks[i] + "|" + } else { + result += nicks[i] + "|" + } + } + return result +} + +func plainformatter(nicks []string, nicksPerRow int) string { + return strings.Join(nicks, ", ") + " currently on IRC" +} + +func IsMarkup(message string) bool { + switch message[0] { + case '|': + fallthrough + case '#': + fallthrough + case '_': + fallthrough + case '*': + fallthrough + case '~': + fallthrough + case '-': + fallthrough + case ':': + fallthrough + case '>': + fallthrough + case '=': + return true + } + return false +} diff --git a/vendor/github.com/42wim/matterbridge-plus/matterclient/LICENSE b/vendor/github.com/42wim/matterbridge-plus/matterclient/LICENSE new file mode 100644 index 00000000..8f71f43f --- /dev/null +++ b/vendor/github.com/42wim/matterbridge-plus/matterclient/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/42wim/matterbridge-plus/matterclient/matterclient.go b/vendor/github.com/42wim/matterbridge-plus/matterclient/matterclient.go new file mode 100644 index 00000000..11891d07 --- /dev/null +++ b/vendor/github.com/42wim/matterbridge-plus/matterclient/matterclient.go @@ -0,0 +1,316 @@ +package matterclient + +import ( + "errors" + log "github.com/Sirupsen/logrus" + "net/http" + "strings" + "time" + + "github.com/gorilla/websocket" + "github.com/jpillora/backoff" + "github.com/mattermost/platform/model" +) + +type Credentials struct { + Login string + Team string + Pass string + Server string + NoTLS bool +} + +type Message struct { + Raw *model.Message + Post *model.Post + Team string + Channel string + Username string + Text string +} + +type MMClient struct { + *Credentials + Client *model.Client + WsClient *websocket.Conn + Channels *model.ChannelList + MoreChannels *model.ChannelList + User *model.User + Users map[string]*model.User + MessageChan chan *Message + Team *model.Team + log *log.Entry +} + +func New(login, pass, team, server string) *MMClient { + cred := &Credentials{Login: login, Pass: pass, Team: team, Server: server} + mmclient := &MMClient{Credentials: cred, MessageChan: make(chan *Message, 100)} + mmclient.log = log.WithFields(log.Fields{"module": "matterclient"}) + log.SetFormatter(&log.TextFormatter{FullTimestamp: true}) + return mmclient +} + +func (m *MMClient) SetLogLevel(level string) { + l, err := log.ParseLevel(level) + if err != nil { + log.SetLevel(log.InfoLevel) + return + } + log.SetLevel(l) +} + +func (m *MMClient) Login() error { + b := &backoff.Backoff{ + Min: time.Second, + Max: 5 * time.Minute, + Jitter: true, + } + uriScheme := "https://" + wsScheme := "wss://" + if m.NoTLS { + uriScheme = "http://" + wsScheme = "ws://" + } + // login to mattermost + m.Client = model.NewClient(uriScheme + m.Credentials.Server) + var myinfo *model.Result + var appErr *model.AppError + var logmsg = "trying login" + for { + m.log.Debugf(logmsg+" %s %s %s", m.Credentials.Team, m.Credentials.Login, m.Credentials.Server) + myinfo, appErr = m.Client.LoginByEmail(m.Credentials.Team, m.Credentials.Login, m.Credentials.Pass) + if appErr != nil { + d := b.Duration() + m.log.Debug(appErr.DetailedError) + if !strings.Contains(appErr.DetailedError, "connection refused") && + !strings.Contains(appErr.DetailedError, "invalid character") { + if appErr.Message == "" { + return errors.New(appErr.DetailedError) + } + return errors.New(appErr.Message) + } + m.log.Debug("LOGIN: %s, reconnecting in %s", appErr, d) + time.Sleep(d) + logmsg = "retrying login" + continue + } + break + } + // reset timer + b.Reset() + m.User = myinfo.Data.(*model.User) + myinfo, _ = m.Client.GetMyTeam("") + m.Team = myinfo.Data.(*model.Team) + + // setup websocket connection + wsurl := wsScheme + m.Credentials.Server + "/api/v1/websocket" + header := http.Header{} + header.Set(model.HEADER_AUTH, "BEARER "+m.Client.AuthToken) + + var WsClient *websocket.Conn + var err error + for { + WsClient, _, err = websocket.DefaultDialer.Dial(wsurl, header) + if err != nil { + d := b.Duration() + log.Printf("WSS: %s, reconnecting in %s", err, d) + time.Sleep(d) + continue + } + break + } + b.Reset() + + m.WsClient = WsClient + + // populating users + m.UpdateUsers() + + // populating channels + m.UpdateChannels() + + return nil +} + +func (m *MMClient) WsReceiver() { + var rmsg model.Message + for { + if err := m.WsClient.ReadJSON(&rmsg); err != nil { + log.Println("error:", err) + // reconnect + m.Login() + } + //log.Printf("WsReceiver: %#v", rmsg) + msg := &Message{Raw: &rmsg, Team: m.Credentials.Team} + m.parseMessage(msg) + m.MessageChan <- msg + } + +} + +func (m *MMClient) parseMessage(rmsg *Message) { + switch rmsg.Raw.Action { + case model.ACTION_POSTED: + m.parseActionPost(rmsg) + /* + case model.ACTION_USER_REMOVED: + m.handleWsActionUserRemoved(&rmsg) + case model.ACTION_USER_ADDED: + m.handleWsActionUserAdded(&rmsg) + */ + } +} + +func (m *MMClient) parseActionPost(rmsg *Message) { + data := model.PostFromJson(strings.NewReader(rmsg.Raw.Props["post"])) + // log.Println("receiving userid", data.UserId) + // we don't have the user, refresh the userlist + if m.Users[data.UserId] == nil { + m.UpdateUsers() + } + rmsg.Username = m.Users[data.UserId].Username + rmsg.Channel = m.GetChannelName(data.ChannelId) + // direct message + if strings.Contains(rmsg.Channel, "__") { + //log.Println("direct message") + rcvusers := strings.Split(rmsg.Channel, "__") + if rcvusers[0] != m.User.Id { + rmsg.Channel = m.Users[rcvusers[0]].Username + } else { + rmsg.Channel = m.Users[rcvusers[1]].Username + } + } + rmsg.Text = data.Message + rmsg.Post = data + return +} + +func (m *MMClient) UpdateUsers() error { + mmusers, _ := m.Client.GetProfiles(m.User.TeamId, "") + m.Users = mmusers.Data.(map[string]*model.User) + return nil +} + +func (m *MMClient) UpdateChannels() error { + mmchannels, _ := m.Client.GetChannels("") + m.Channels = mmchannels.Data.(*model.ChannelList) + mmchannels, _ = m.Client.GetMoreChannels("") + m.MoreChannels = mmchannels.Data.(*model.ChannelList) + return nil +} + +func (m *MMClient) GetChannelName(id string) string { + for _, channel := range append(m.Channels.Channels, m.MoreChannels.Channels...) { + if channel.Id == id { + return channel.Name + } + } + // not found? could be a new direct message from mattermost. Try to update and check again + m.UpdateChannels() + for _, channel := range append(m.Channels.Channels, m.MoreChannels.Channels...) { + if channel.Id == id { + return channel.Name + } + } + return "" +} + +func (m *MMClient) GetChannelId(name string) string { + for _, channel := range append(m.Channels.Channels, m.MoreChannels.Channels...) { + if channel.Name == name { + return channel.Id + } + } + return "" +} + +func (m *MMClient) GetChannelHeader(id string) string { + for _, channel := range append(m.Channels.Channels, m.MoreChannels.Channels...) { + if channel.Id == id { + return channel.Header + } + } + return "" +} + +func (m *MMClient) PostMessage(channel string, text string) { + post := &model.Post{ChannelId: m.GetChannelId(channel), Message: text} + m.Client.CreatePost(post) +} + +func (m *MMClient) JoinChannel(channel string) error { + cleanChan := strings.Replace(channel, "#", "", 1) + if m.GetChannelId(cleanChan) == "" { + return errors.New("failed to join") + } + for _, c := range m.Channels.Channels { + if c.Name == cleanChan { + m.log.Debug("Not joining ", cleanChan, " already joined.") + return nil + } + } + m.log.Debug("Joining ", cleanChan) + _, err := m.Client.JoinChannel(m.GetChannelId(cleanChan)) + if err != nil { + return errors.New("failed to join") + } + // m.SyncChannel(m.getMMChannelId(strings.Replace(channel, "#", "", 1)), strings.Replace(channel, "#", "", 1)) + return nil +} + +func (m *MMClient) GetPostsSince(channelId string, time int64) *model.PostList { + res, err := m.Client.GetPostsSince(channelId, time) + if err != nil { + return nil + } + return res.Data.(*model.PostList) +} + +func (m *MMClient) SearchPosts(query string) *model.PostList { + res, err := m.Client.SearchPosts(query) + if err != nil { + return nil + } + return res.Data.(*model.PostList) +} + +func (m *MMClient) GetPosts(channelId string, limit int) *model.PostList { + res, err := m.Client.GetPosts(channelId, 0, limit, "") + if err != nil { + return nil + } + return res.Data.(*model.PostList) +} + +func (m *MMClient) UpdateChannelHeader(channelId string, header string) { + data := make(map[string]string) + data["channel_id"] = channelId + data["channel_header"] = header + log.Printf("updating channelheader %#v, %#v", channelId, header) + _, err := m.Client.UpdateChannelHeader(data) + if err != nil { + log.Print(err) + } +} + +func (m *MMClient) UpdateLastViewed(channelId string) { + log.Printf("posting lastview %#v", channelId) + _, err := m.Client.UpdateLastViewedAt(channelId) + if err != nil { + log.Print(err) + } +} + +func (m *MMClient) UsernamesInChannel(channelName string) []string { + ceiRes, err := m.Client.GetChannelExtraInfo(m.GetChannelId(channelName), 5000, "") + if err != nil { + log.Errorf("UsernamesInChannel(%s) failed: %s", channelName, err) + return []string{} + } + extra := ceiRes.Data.(*model.ChannelExtra) + result := []string{} + for _, member := range extra.Members { + result = append(result, member.Username) + } + return result +} diff --git a/vendor/github.com/42wim/matterbridge/matterhook/LICENSE b/vendor/github.com/42wim/matterbridge/matterhook/LICENSE new file mode 100644 index 00000000..8f71f43f --- /dev/null +++ b/vendor/github.com/42wim/matterbridge/matterhook/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/42wim/matterbridge/matterhook/matterhook.go b/vendor/github.com/42wim/matterbridge/matterhook/matterhook.go new file mode 100644 index 00000000..0b023d39 --- /dev/null +++ b/vendor/github.com/42wim/matterbridge/matterhook/matterhook.go @@ -0,0 +1,154 @@ +//Package matterhook provides interaction with mattermost incoming/outgoing webhooks +package matterhook + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "github.com/gorilla/schema" + "io" + "io/ioutil" + "log" + "net/http" + "strconv" +) + +// OMessage for mattermost incoming webhook. (send to mattermost) +type OMessage struct { + Channel string `json:"channel,omitempty"` + IconURL string `json:"icon_url,omitempty"` + IconEmoji string `json:"icon_emoji,omitempty"` + UserName string `json:"username,omitempty"` + Text string `json:"text"` + Attachments interface{} `json:"attachments,omitempty"` + Type string `json:"type,omitempty"` +} + +// IMessage for mattermost outgoing webhook. (received from mattermost) +type IMessage struct { + Token string `schema:"token"` + TeamID string `schema:"team_id"` + TeamDomain string `schema:"team_domain"` + ChannelID string `schema:"channel_id"` + ServiceID string `schema:"service_id"` + ChannelName string `schema:"channel_name"` + Timestamp string `schema:"timestamp"` + UserID string `schema:"user_id"` + UserName string `schema:"user_name"` + Text string `schema:"text"` + TriggerWord string `schema:"trigger_word"` +} + +// Client for Mattermost. +type Client struct { + Url string // URL for incoming webhooks on mattermost. + In chan IMessage + Out chan OMessage + httpclient *http.Client + Config +} + +// Config for client. +type Config struct { + Port int // Port to listen on. + BindAddress string // Address to listen on + Token string // Only allow this token from Mattermost. (Allow everything when empty) + InsecureSkipVerify bool // disable certificate checking + DisableServer bool // Do not start server for outgoing webhooks from Mattermost. +} + +// New Mattermost client. +func New(url string, config Config) *Client { + c := &Client{Url: url, In: make(chan IMessage), Out: make(chan OMessage), Config: config} + if c.Port == 0 { + c.Port = 9999 + } + c.BindAddress += ":" + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: config.InsecureSkipVerify}, + } + c.httpclient = &http.Client{Transport: tr} + if !c.DisableServer { + go c.StartServer() + } + return c +} + +// StartServer starts a webserver listening for incoming mattermost POSTS. +func (c *Client) StartServer() { + mux := http.NewServeMux() + mux.Handle("/", c) + log.Printf("Listening on http://%v:%v...\n", c.BindAddress, c.Port) + if err := http.ListenAndServe((c.BindAddress + strconv.Itoa(c.Port)), mux); err != nil { + log.Fatal(err) + } +} + +// ServeHTTP implementation. +func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + log.Println("invalid " + r.Method + " connection from " + r.RemoteAddr) + http.NotFound(w, r) + return + } + msg := IMessage{} + err := r.ParseForm() + if err != nil { + log.Println(err) + http.NotFound(w, r) + return + } + defer r.Body.Close() + decoder := schema.NewDecoder() + err = decoder.Decode(&msg, r.PostForm) + if err != nil { + log.Println(err) + http.NotFound(w, r) + return + } + if msg.Token == "" { + log.Println("no token from " + r.RemoteAddr) + http.NotFound(w, r) + return + } + if c.Token != "" { + if msg.Token != c.Token { + log.Println("invalid token " + msg.Token + " from " + r.RemoteAddr) + http.NotFound(w, r) + return + } + } + c.In <- msg +} + +// Receive returns an incoming message from mattermost outgoing webhooks URL. +func (c *Client) Receive() IMessage { + for { + select { + case msg := <-c.In: + return msg + } + } +} + +// Send sends a msg to mattermost incoming webhooks URL. +func (c *Client) Send(msg OMessage) error { + buf, err := json.Marshal(msg) + if err != nil { + return err + } + resp, err := c.httpclient.Post(c.Url, "application/json", bytes.NewReader(buf)) + if err != nil { + return err + } + defer resp.Body.Close() + + // Read entire body to completion to re-use keep-alive connections. + io.Copy(ioutil.Discard, resp.Body) + + if resp.StatusCode != 200 { + return fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 00000000..f090cb42 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 00000000..dddd5f87 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/Sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/Sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 00000000..89e966e7 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,264 @@ +package logrus + +import ( + "bytes" + "fmt" + "io" + "os" + "time" +) + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns a reader for the entry, which is a proxy to the formatter. +func (entry *Entry) Reader() (*bytes.Buffer, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + return bytes.NewBuffer(serialized), err +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + reader, err := entry.Reader() + if err != nil { + return "", err + } + + return reader.String(), err +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + + reader, err := entry.Reader() + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } + + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + + _, err = io.Copy(entry.Logger.Out, reader) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go b/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go new file mode 100644 index 00000000..a1623ec0 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go @@ -0,0 +1,50 @@ +package main + +import ( + "github.com/Sirupsen/logrus" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) // default + log.Level = logrus.DebugLevel +} + +func main() { + defer func() { + err := recover() + if err != nil { + log.WithFields(logrus.Fields{ + "omg": true, + "err": err, + "number": 100, + }).Fatal("The ice breaks!") + } + }() + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "number": 8, + }).Debug("Started observing beach") + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "temperature": -4, + }).Debug("Temperature changes") + + log.WithFields(logrus.Fields{ + "animal": "orca", + "size": 9009, + }).Panic("It's over 9000!") +} diff --git a/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go new file mode 100644 index 00000000..3187f6d3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go @@ -0,0 +1,30 @@ +package main + +import ( + "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.TextFormatter) // default + log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) +} + +func main() { + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 00000000..9a0120ac --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 00000000..104d689f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,48 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + _, ok := data["time"] + if ok { + data["fields.time"] = data["time"] + } + + _, ok = data["msg"] + if ok { + data["fields.msg"] = data["msg"] + } + + _, ok = data["level"] + if ok { + data["fields.level"] = data["level"] + } +} diff --git a/vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go new file mode 100644 index 00000000..aad646ab --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go @@ -0,0 +1,61 @@ +package logstash + +import ( + "encoding/json" + "fmt" + + "github.com/Sirupsen/logrus" +) + +// Formatter generates json in logstash format. +// Logstash site: http://logstash.net/ +type LogstashFormatter struct { + Type string // if not empty use for logstash type field. + + // TimestampFormat sets the format used for timestamps. + TimestampFormat string +} + +func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { + fields := make(logrus.Fields) + for k, v := range entry.Data { + fields[k] = v + } + + fields["@version"] = 1 + + if f.TimestampFormat == "" { + f.TimestampFormat = logrus.DefaultTimestampFormat + } + + fields["@timestamp"] = entry.Time.Format(f.TimestampFormat) + + // set message field + v, ok := entry.Data["message"] + if ok { + fields["fields.message"] = v + } + fields["message"] = entry.Message + + // set level field + v, ok = entry.Data["level"] + if ok { + fields["fields.level"] = v + } + fields["level"] = entry.Level.String() + + // set type field + if f.Type != "" { + v, ok = entry.Data["type"] + if ok { + fields["fields.type"] = v + } + fields["type"] = f.Type + } + + serialized, err := json.Marshal(fields) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 00000000..3f151cdc --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go new file mode 100644 index 00000000..a36e2003 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go @@ -0,0 +1,54 @@ +// +build !windows,!nacl,!plan9 + +package logrus_syslog + +import ( + "fmt" + "github.com/Sirupsen/logrus" + "log/syslog" + "os" +) + +// SyslogHook to send logs via syslog. +type SyslogHook struct { + Writer *syslog.Writer + SyslogNetwork string + SyslogRaddr string +} + +// Creates a hook to be added to an instance of logger. This is called with +// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` +// `if err == nil { log.Hooks.Add(hook) }` +func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { + w, err := syslog.Dial(network, raddr, priority, tag) + return &SyslogHook{w, network, raddr}, err +} + +func (hook *SyslogHook) Fire(entry *logrus.Entry) error { + line, err := entry.String() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) + return err + } + + switch entry.Level { + case logrus.PanicLevel: + return hook.Writer.Crit(line) + case logrus.FatalLevel: + return hook.Writer.Crit(line) + case logrus.ErrorLevel: + return hook.Writer.Err(line) + case logrus.WarnLevel: + return hook.Writer.Warning(line) + case logrus.InfoLevel: + return hook.Writer.Info(line) + case logrus.DebugLevel: + return hook.Writer.Debug(line) + default: + return nil + } +} + +func (hook *SyslogHook) Levels() []logrus.Level { + return logrus.AllLevels +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go new file mode 100644 index 00000000..06881253 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go @@ -0,0 +1,67 @@ +package test + +import ( + "io/ioutil" + + "github.com/Sirupsen/logrus" +) + +// test.Hook is a hook designed for dealing with logs in test scenarios. +type Hook struct { + Entries []*logrus.Entry +} + +// Installs a test hook for the global logger. +func NewGlobal() *Hook { + + hook := new(Hook) + logrus.AddHook(hook) + + return hook + +} + +// Installs a test hook for a given local logger. +func NewLocal(logger *logrus.Logger) *Hook { + + hook := new(Hook) + logger.Hooks.Add(hook) + + return hook + +} + +// Creates a discarding logger and installs the test hook. +func NewNullLogger() (*logrus.Logger, *Hook) { + + logger := logrus.New() + logger.Out = ioutil.Discard + + return logger, NewLocal(logger) + +} + +func (t *Hook) Fire(e *logrus.Entry) error { + t.Entries = append(t.Entries, e) + return nil +} + +func (t *Hook) Levels() []logrus.Level { + return logrus.AllLevels +} + +// LastEntry returns the last entry that was logged or nil. +func (t *Hook) LastEntry() (l *logrus.Entry) { + + if i := len(t.Entries) - 1; i < 0 { + return nil + } else { + return t.Entries[i] + } + +} + +// Reset removes all Entries from this test hook. +func (t *Hook) Reset() { + t.Entries = make([]*logrus.Entry, 0) +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 00000000..2ad6dc5c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,41 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + data["time"] = entry.Time.Format(timestampFormat) + data["msg"] = entry.Message + data["level"] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 00000000..2fdb2317 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,212 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. + mu sync.Mutex +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +// Adds a field to the log entry, note that you it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(logger).WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + return NewEntry(logger).WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + return NewEntry(logger).WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugf(format, args...) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infof(format, args...) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + NewEntry(logger).Printf(format, args...) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorf(format, args...) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalf(format, args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicf(format, args...) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debug(args...) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Info(args...) + } +} + +func (logger *Logger) Print(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Error(args...) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatal(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panic(args...) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugln(args...) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infoln(args...) + } +} + +func (logger *Logger) Println(args ...interface{}) { + NewEntry(logger).Println(args...) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorln(args...) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalln(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicln(args...) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 00000000..e5966911 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,143 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 00000000..71f8d67a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,9 @@ +// +build darwin freebsd openbsd netbsd dragonfly + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 00000000..a2c0b40d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 00000000..b343b3a3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,21 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 00000000..3e70bf7b --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,15 @@ +// +build solaris + +package logrus + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 00000000..0146845d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 00000000..06ef2023 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,161 @@ +package logrus + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + + b := &bytes.Buffer{} + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) + } +} + +func needsQuoting(text string) bool { + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return false + } + } + return true +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + + switch value := value.(type) { + case string: + if needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%q", value) + } + case error: + errmsg := value.Error() + if needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%q", value) + } + default: + fmt.Fprint(b, value) + } + + b.WriteByte(' ') +} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 00000000..1e30b1c7 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,31 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + reader, writer := io.Pipe() + + go logger.writerScanner(reader) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + logger.Print(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/alecthomas/log4go/LICENSE b/vendor/github.com/alecthomas/log4go/LICENSE new file mode 100644 index 00000000..7093402b --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2010, Kyle Lemons . All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/alecthomas/log4go/config.go b/vendor/github.com/alecthomas/log4go/config.go new file mode 100644 index 00000000..577c3eb2 --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/config.go @@ -0,0 +1,288 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +type xmlProperty struct { + Name string `xml:"name,attr"` + Value string `xml:",chardata"` +} + +type xmlFilter struct { + Enabled string `xml:"enabled,attr"` + Tag string `xml:"tag"` + Level string `xml:"level"` + Type string `xml:"type"` + Property []xmlProperty `xml:"property"` +} + +type xmlLoggerConfig struct { + Filter []xmlFilter `xml:"filter"` +} + +// Load XML configuration; see examples/example.xml for documentation +func (log Logger) LoadConfiguration(filename string) { + log.Close() + + // Open the configuration file + fd, err := os.Open(filename) + if err != nil { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not open %q for reading: %s\n", filename, err) + os.Exit(1) + } + + contents, err := ioutil.ReadAll(fd) + if err != nil { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not read %q: %s\n", filename, err) + os.Exit(1) + } + + xc := new(xmlLoggerConfig) + if err := xml.Unmarshal(contents, xc); err != nil { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not parse XML configuration in %q: %s\n", filename, err) + os.Exit(1) + } + + for _, xmlfilt := range xc.Filter { + var filt LogWriter + var lvl Level + bad, good, enabled := false, true, false + + // Check required children + if len(xmlfilt.Enabled) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required attribute %s for filter missing in %s\n", "enabled", filename) + bad = true + } else { + enabled = xmlfilt.Enabled != "false" + } + if len(xmlfilt.Tag) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "tag", filename) + bad = true + } + if len(xmlfilt.Type) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "type", filename) + bad = true + } + if len(xmlfilt.Level) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "level", filename) + bad = true + } + + switch xmlfilt.Level { + case "FINEST": + lvl = FINEST + case "FINE": + lvl = FINE + case "DEBUG": + lvl = DEBUG + case "TRACE": + lvl = TRACE + case "INFO": + lvl = INFO + case "WARNING": + lvl = WARNING + case "ERROR": + lvl = ERROR + case "CRITICAL": + lvl = CRITICAL + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter has unknown value in %s: %s\n", "level", filename, xmlfilt.Level) + bad = true + } + + // Just so all of the required attributes are errored at the same time if missing + if bad { + os.Exit(1) + } + + switch xmlfilt.Type { + case "console": + filt, good = xmlToConsoleLogWriter(filename, xmlfilt.Property, enabled) + case "file": + filt, good = xmlToFileLogWriter(filename, xmlfilt.Property, enabled) + case "xml": + filt, good = xmlToXMLLogWriter(filename, xmlfilt.Property, enabled) + case "socket": + filt, good = xmlToSocketLogWriter(filename, xmlfilt.Property, enabled) + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not load XML configuration in %s: unknown filter type \"%s\"\n", filename, xmlfilt.Type) + os.Exit(1) + } + + // Just so all of the required params are errored at the same time if wrong + if !good { + os.Exit(1) + } + + // If we're disabled (syntax and correctness checks only), don't add to logger + if !enabled { + continue + } + + log[xmlfilt.Tag] = &Filter{lvl, filt} + } +} + +func xmlToConsoleLogWriter(filename string, props []xmlProperty, enabled bool) (*ConsoleLogWriter, bool) { + // Parse properties + for _, prop := range props { + switch prop.Name { + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for console filter in %s\n", prop.Name, filename) + } + } + + // If it's disabled, we're just checking syntax + if !enabled { + return nil, true + } + + return NewConsoleLogWriter(), true +} + +// Parse a number with K/M/G suffixes based on thousands (1000) or 2^10 (1024) +func strToNumSuffix(str string, mult int) int { + num := 1 + if len(str) > 1 { + switch str[len(str)-1] { + case 'G', 'g': + num *= mult + fallthrough + case 'M', 'm': + num *= mult + fallthrough + case 'K', 'k': + num *= mult + str = str[0 : len(str)-1] + } + } + parsed, _ := strconv.Atoi(str) + return parsed * num +} +func xmlToFileLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) { + file := "" + format := "[%D %T] [%L] (%S) %M" + maxlines := 0 + maxsize := 0 + daily := false + rotate := false + + // Parse properties + for _, prop := range props { + switch prop.Name { + case "filename": + file = strings.Trim(prop.Value, " \r\n") + case "format": + format = strings.Trim(prop.Value, " \r\n") + case "maxlines": + maxlines = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000) + case "maxsize": + maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024) + case "daily": + daily = strings.Trim(prop.Value, " \r\n") != "false" + case "rotate": + rotate = strings.Trim(prop.Value, " \r\n") != "false" + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename) + } + } + + // Check properties + if len(file) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "filename", filename) + return nil, false + } + + // If it's disabled, we're just checking syntax + if !enabled { + return nil, true + } + + flw := NewFileLogWriter(file, rotate) + flw.SetFormat(format) + flw.SetRotateLines(maxlines) + flw.SetRotateSize(maxsize) + flw.SetRotateDaily(daily) + return flw, true +} + +func xmlToXMLLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) { + file := "" + maxrecords := 0 + maxsize := 0 + daily := false + rotate := false + + // Parse properties + for _, prop := range props { + switch prop.Name { + case "filename": + file = strings.Trim(prop.Value, " \r\n") + case "maxrecords": + maxrecords = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000) + case "maxsize": + maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024) + case "daily": + daily = strings.Trim(prop.Value, " \r\n") != "false" + case "rotate": + rotate = strings.Trim(prop.Value, " \r\n") != "false" + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for xml filter in %s\n", prop.Name, filename) + } + } + + // Check properties + if len(file) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for xml filter missing in %s\n", "filename", filename) + return nil, false + } + + // If it's disabled, we're just checking syntax + if !enabled { + return nil, true + } + + xlw := NewXMLLogWriter(file, rotate) + xlw.SetRotateLines(maxrecords) + xlw.SetRotateSize(maxsize) + xlw.SetRotateDaily(daily) + return xlw, true +} + +func xmlToSocketLogWriter(filename string, props []xmlProperty, enabled bool) (SocketLogWriter, bool) { + endpoint := "" + protocol := "udp" + + // Parse properties + for _, prop := range props { + switch prop.Name { + case "endpoint": + endpoint = strings.Trim(prop.Value, " \r\n") + case "protocol": + protocol = strings.Trim(prop.Value, " \r\n") + default: + fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename) + } + } + + // Check properties + if len(endpoint) == 0 { + fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "endpoint", filename) + return nil, false + } + + // If it's disabled, we're just checking syntax + if !enabled { + return nil, true + } + + return NewSocketLogWriter(protocol, endpoint), true +} diff --git a/vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go b/vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go new file mode 100644 index 00000000..698dd332 --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/examples/ConsoleLogWriter_Manual.go @@ -0,0 +1,14 @@ +package main + +import ( + "time" +) + +import l4g "code.google.com/p/log4go" + +func main() { + log := l4g.NewLogger() + defer log.Close() + log.AddFilter("stdout", l4g.DEBUG, l4g.NewConsoleLogWriter()) + log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) +} diff --git a/vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go b/vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go new file mode 100644 index 00000000..efd596aa --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/examples/FileLogWriter_Manual.go @@ -0,0 +1,57 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "time" +) + +import l4g "code.google.com/p/log4go" + +const ( + filename = "flw.log" +) + +func main() { + // Get a new logger instance + log := l4g.NewLogger() + + // Create a default logger that is logging messages of FINE or higher + log.AddFilter("file", l4g.FINE, l4g.NewFileLogWriter(filename, false)) + log.Close() + + /* Can also specify manually via the following: (these are the defaults) */ + flw := l4g.NewFileLogWriter(filename, false) + flw.SetFormat("[%D %T] [%L] (%S) %M") + flw.SetRotate(false) + flw.SetRotateSize(0) + flw.SetRotateLines(0) + flw.SetRotateDaily(false) + log.AddFilter("file", l4g.FINE, flw) + + // Log some experimental messages + log.Finest("Everything is created now (notice that I will not be printing to the file)") + log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) + log.Critical("Time to close out!") + + // Close the log + log.Close() + + // Print what was logged to the file (yes, I know I'm skipping error checking) + fd, _ := os.Open(filename) + in := bufio.NewReader(fd) + fmt.Print("Messages logged to file were: (line numbers not included)\n") + for lineno := 1; ; lineno++ { + line, err := in.ReadString('\n') + if err == io.EOF { + break + } + fmt.Printf("%3d:\t%s", lineno, line) + } + fd.Close() + + // Remove the file so it's not lying around + os.Remove(filename) +} diff --git a/vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go b/vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go new file mode 100644 index 00000000..83c80ad1 --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/examples/SimpleNetLogServer.go @@ -0,0 +1,42 @@ +package main + +import ( + "flag" + "fmt" + "net" + "os" +) + +var ( + port = flag.String("p", "12124", "Port number to listen on") +) + +func e(err error) { + if err != nil { + fmt.Printf("Erroring out: %s\n", err) + os.Exit(1) + } +} + +func main() { + flag.Parse() + + // Bind to the port + bind, err := net.ResolveUDPAddr("0.0.0.0:" + *port) + e(err) + + // Create listener + listener, err := net.ListenUDP("udp", bind) + e(err) + + fmt.Printf("Listening to port %s...\n", *port) + for { + // read into a new buffer + buffer := make([]byte, 1024) + _, _, err := listener.ReadFrom(buffer) + e(err) + + // log to standard output + fmt.Println(string(buffer)) + } +} diff --git a/vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go b/vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go new file mode 100644 index 00000000..400b698c --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/examples/SocketLogWriter_Manual.go @@ -0,0 +1,18 @@ +package main + +import ( + "time" +) + +import l4g "code.google.com/p/log4go" + +func main() { + log := l4g.NewLogger() + log.AddFilter("network", l4g.FINEST, l4g.NewSocketLogWriter("udp", "192.168.1.255:12124")) + + // Run `nc -u -l -p 12124` or similar before you run this to see the following message + log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) + + // This makes sure the output stream buffer is written + log.Close() +} diff --git a/vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go b/vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go new file mode 100644 index 00000000..164c2add --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/examples/XMLConfigurationExample.go @@ -0,0 +1,13 @@ +package main + +import l4g "code.google.com/p/log4go" + +func main() { + // Load the configuration (isn't this easy?) + l4g.LoadConfiguration("example.xml") + + // And now we're ready! + l4g.Finest("This will only go to those of you really cool UDP kids! If you change enabled=true.") + l4g.Debug("Oh no! %d + %d = %d!", 2, 2, 2+2) + l4g.Info("About that time, eh chaps?") +} diff --git a/vendor/github.com/alecthomas/log4go/filelog.go b/vendor/github.com/alecthomas/log4go/filelog.go new file mode 100644 index 00000000..ee0ab0c0 --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/filelog.go @@ -0,0 +1,264 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "fmt" + "os" + "time" +) + +// This log writer sends output to a file +type FileLogWriter struct { + rec chan *LogRecord + rot chan bool + + // The opened file + filename string + file *os.File + + // The logging format + format string + + // File header/trailer + header, trailer string + + // Rotate at linecount + maxlines int + maxlines_curlines int + + // Rotate at size + maxsize int + maxsize_cursize int + + // Rotate daily + daily bool + daily_opendate int + + // Keep old logfiles (.001, .002, etc) + rotate bool + maxbackup int +} + +// This is the FileLogWriter's output method +func (w *FileLogWriter) LogWrite(rec *LogRecord) { + w.rec <- rec +} + +func (w *FileLogWriter) Close() { + close(w.rec) + w.file.Sync() +} + +// NewFileLogWriter creates a new LogWriter which writes to the given file and +// has rotation enabled if rotate is true. +// +// If rotate is true, any time a new log file is opened, the old one is renamed +// with a .### extension to preserve it. The various Set* methods can be used +// to configure log rotation based on lines, size, and daily. +// +// The standard log-line format is: +// [%D %T] [%L] (%S) %M +func NewFileLogWriter(fname string, rotate bool) *FileLogWriter { + w := &FileLogWriter{ + rec: make(chan *LogRecord, LogBufferLength), + rot: make(chan bool), + filename: fname, + format: "[%D %T] [%L] (%S) %M", + rotate: rotate, + maxbackup: 999, + } + + // open the file for the first time + if err := w.intRotate(); err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) + return nil + } + + go func() { + defer func() { + if w.file != nil { + fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()})) + w.file.Close() + } + }() + + for { + select { + case <-w.rot: + if err := w.intRotate(); err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) + return + } + case rec, ok := <-w.rec: + if !ok { + return + } + now := time.Now() + if (w.maxlines > 0 && w.maxlines_curlines >= w.maxlines) || + (w.maxsize > 0 && w.maxsize_cursize >= w.maxsize) || + (w.daily && now.Day() != w.daily_opendate) { + if err := w.intRotate(); err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) + return + } + } + + // Perform the write + n, err := fmt.Fprint(w.file, FormatLogRecord(w.format, rec)) + if err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) + return + } + + // Update the counts + w.maxlines_curlines++ + w.maxsize_cursize += n + } + } + }() + + return w +} + +// Request that the logs rotate +func (w *FileLogWriter) Rotate() { + w.rot <- true +} + +// If this is called in a threaded context, it MUST be synchronized +func (w *FileLogWriter) intRotate() error { + // Close any log file that may be open + if w.file != nil { + fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()})) + w.file.Close() + } + + // If we are keeping log files, move it to the next available number + if w.rotate { + _, err := os.Lstat(w.filename) + if err == nil { // file exists + // Find the next available number + num := 1 + fname := "" + if w.daily && time.Now().Day() != w.daily_opendate { + yesterday := time.Now().AddDate(0, 0, -1).Format("2006-01-02") + + for ; err == nil && num <= 999; num++ { + fname = w.filename + fmt.Sprintf(".%s.%03d", yesterday, num) + _, err = os.Lstat(fname) + } + // return error if the last file checked still existed + if err == nil { + return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.filename) + } + } else { + num = w.maxbackup - 1 + for ; num >= 1; num-- { + fname = w.filename + fmt.Sprintf(".%d", num) + nfname := w.filename + fmt.Sprintf(".%d", num+1) + _, err = os.Lstat(fname) + if err == nil { + os.Rename(fname, nfname) + } + } + } + + w.file.Close() + // Rename the file to its newfound home + err = os.Rename(w.filename, fname) + if err != nil { + return fmt.Errorf("Rotate: %s\n", err) + } + } + } + + // Open the log file + fd, err := os.OpenFile(w.filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660) + if err != nil { + return err + } + w.file = fd + + now := time.Now() + fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: now})) + + // Set the daily open date to the current date + w.daily_opendate = now.Day() + + // initialize rotation values + w.maxlines_curlines = 0 + w.maxsize_cursize = 0 + + return nil +} + +// Set the logging format (chainable). Must be called before the first log +// message is written. +func (w *FileLogWriter) SetFormat(format string) *FileLogWriter { + w.format = format + return w +} + +// Set the logfile header and footer (chainable). Must be called before the first log +// message is written. These are formatted similar to the FormatLogRecord (e.g. +// you can use %D and %T in your header/footer for date and time). +func (w *FileLogWriter) SetHeadFoot(head, foot string) *FileLogWriter { + w.header, w.trailer = head, foot + if w.maxlines_curlines == 0 { + fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: time.Now()})) + } + return w +} + +// Set rotate at linecount (chainable). Must be called before the first log +// message is written. +func (w *FileLogWriter) SetRotateLines(maxlines int) *FileLogWriter { + //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateLines: %v\n", maxlines) + w.maxlines = maxlines + return w +} + +// Set rotate at size (chainable). Must be called before the first log message +// is written. +func (w *FileLogWriter) SetRotateSize(maxsize int) *FileLogWriter { + //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateSize: %v\n", maxsize) + w.maxsize = maxsize + return w +} + +// Set rotate daily (chainable). Must be called before the first log message is +// written. +func (w *FileLogWriter) SetRotateDaily(daily bool) *FileLogWriter { + //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateDaily: %v\n", daily) + w.daily = daily + return w +} + +// Set max backup files. Must be called before the first log message +// is written. +func (w *FileLogWriter) SetRotateMaxBackup(maxbackup int) *FileLogWriter { + w.maxbackup = maxbackup + return w +} + +// SetRotate changes whether or not the old logs are kept. (chainable) Must be +// called before the first log message is written. If rotate is false, the +// files are overwritten; otherwise, they are rotated to another file before the +// new log is opened. +func (w *FileLogWriter) SetRotate(rotate bool) *FileLogWriter { + //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotate: %v\n", rotate) + w.rotate = rotate + return w +} + +// NewXMLLogWriter is a utility method for creating a FileLogWriter set up to +// output XML record log messages instead of line-based ones. +func NewXMLLogWriter(fname string, rotate bool) *FileLogWriter { + return NewFileLogWriter(fname, rotate).SetFormat( + ` + %D %T + %S + %M + `).SetHeadFoot("", "") +} diff --git a/vendor/github.com/alecthomas/log4go/log4go.go b/vendor/github.com/alecthomas/log4go/log4go.go new file mode 100644 index 00000000..822e890c --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/log4go.go @@ -0,0 +1,484 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +// Package log4go provides level-based and highly configurable logging. +// +// Enhanced Logging +// +// This is inspired by the logging functionality in Java. Essentially, you create a Logger +// object and create output filters for it. You can send whatever you want to the Logger, +// and it will filter that based on your settings and send it to the outputs. This way, you +// can put as much debug code in your program as you want, and when you're done you can filter +// out the mundane messages so only the important ones show up. +// +// Utility functions are provided to make life easier. Here is some example code to get started: +// +// log := log4go.NewLogger() +// log.AddFilter("stdout", log4go.DEBUG, log4go.NewConsoleLogWriter()) +// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true)) +// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) +// +// The first two lines can be combined with the utility NewDefaultLogger: +// +// log := log4go.NewDefaultLogger(log4go.DEBUG) +// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true)) +// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) +// +// Usage notes: +// - The ConsoleLogWriter does not display the source of the message to standard +// output, but the FileLogWriter does. +// - The utility functions (Info, Debug, Warn, etc) derive their source from the +// calling function, and this incurs extra overhead. +// +// Changes from 2.0: +// - The external interface has remained mostly stable, but a lot of the +// internals have been changed, so if you depended on any of this or created +// your own LogWriter, then you will probably have to update your code. In +// particular, Logger is now a map and ConsoleLogWriter is now a channel +// behind-the-scenes, and the LogWrite method no longer has return values. +// +// Future work: (please let me know if you think I should work on any of these particularly) +// - Log file rotation +// - Logging configuration files ala log4j +// - Have the ability to remove filters? +// - Have GetInfoChannel, GetDebugChannel, etc return a chan string that allows +// for another method of logging +// - Add an XML filter type +package log4go + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" + "time" +) + +// Version information +const ( + L4G_VERSION = "log4go-v3.0.1" + L4G_MAJOR = 3 + L4G_MINOR = 0 + L4G_BUILD = 1 +) + +/****** Constants ******/ + +// These are the integer logging levels used by the logger +type Level int + +const ( + FINEST Level = iota + FINE + DEBUG + TRACE + INFO + WARNING + ERROR + CRITICAL +) + +// Logging level strings +var ( + levelStrings = [...]string{"FNST", "FINE", "DEBG", "TRAC", "INFO", "WARN", "EROR", "CRIT"} +) + +func (l Level) String() string { + if l < 0 || int(l) > len(levelStrings) { + return "UNKNOWN" + } + return levelStrings[int(l)] +} + +/****** Variables ******/ +var ( + // LogBufferLength specifies how many log messages a particular log4go + // logger can buffer at a time before writing them. + LogBufferLength = 32 +) + +/****** LogRecord ******/ + +// A LogRecord contains all of the pertinent information for each message +type LogRecord struct { + Level Level // The log level + Created time.Time // The time at which the log message was created (nanoseconds) + Source string // The message source + Message string // The log message +} + +/****** LogWriter ******/ + +// This is an interface for anything that should be able to write logs +type LogWriter interface { + // This will be called to log a LogRecord message. + LogWrite(rec *LogRecord) + + // This should clean up anything lingering about the LogWriter, as it is called before + // the LogWriter is removed. LogWrite should not be called after Close. + Close() +} + +/****** Logger ******/ + +// A Filter represents the log level below which no log records are written to +// the associated LogWriter. +type Filter struct { + Level Level + LogWriter +} + +// A Logger represents a collection of Filters through which log messages are +// written. +type Logger map[string]*Filter + +// Create a new logger. +// +// DEPRECATED: Use make(Logger) instead. +func NewLogger() Logger { + os.Stderr.WriteString("warning: use of deprecated NewLogger\n") + return make(Logger) +} + +// Create a new logger with a "stdout" filter configured to send log messages at +// or above lvl to standard output. +// +// DEPRECATED: use NewDefaultLogger instead. +func NewConsoleLogger(lvl Level) Logger { + os.Stderr.WriteString("warning: use of deprecated NewConsoleLogger\n") + return Logger{ + "stdout": &Filter{lvl, NewConsoleLogWriter()}, + } +} + +// Create a new logger with a "stdout" filter configured to send log messages at +// or above lvl to standard output. +func NewDefaultLogger(lvl Level) Logger { + return Logger{ + "stdout": &Filter{lvl, NewConsoleLogWriter()}, + } +} + +// Closes all log writers in preparation for exiting the program or a +// reconfiguration of logging. Calling this is not really imperative, unless +// you want to guarantee that all log messages are written. Close removes +// all filters (and thus all LogWriters) from the logger. +func (log Logger) Close() { + // Close all open loggers + for name, filt := range log { + filt.Close() + delete(log, name) + } +} + +// Add a new LogWriter to the Logger which will only log messages at lvl or +// higher. This function should not be called from multiple goroutines. +// Returns the logger for chaining. +func (log Logger) AddFilter(name string, lvl Level, writer LogWriter) Logger { + log[name] = &Filter{lvl, writer} + return log +} + +/******* Logging *******/ +// Send a formatted log message internally +func (log Logger) intLogf(lvl Level, format string, args ...interface{}) { + skip := true + + // Determine if any logging will be done + for _, filt := range log { + if lvl >= filt.Level { + skip = false + break + } + } + if skip { + return + } + + // Determine caller func + pc, _, lineno, ok := runtime.Caller(2) + src := "" + if ok { + src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno) + } + + msg := format + if len(args) > 0 { + msg = fmt.Sprintf(format, args...) + } + + // Make the log record + rec := &LogRecord{ + Level: lvl, + Created: time.Now(), + Source: src, + Message: msg, + } + + // Dispatch the logs + for _, filt := range log { + if lvl < filt.Level { + continue + } + filt.LogWrite(rec) + } +} + +// Send a closure log message internally +func (log Logger) intLogc(lvl Level, closure func() string) { + skip := true + + // Determine if any logging will be done + for _, filt := range log { + if lvl >= filt.Level { + skip = false + break + } + } + if skip { + return + } + + // Determine caller func + pc, _, lineno, ok := runtime.Caller(2) + src := "" + if ok { + src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno) + } + + // Make the log record + rec := &LogRecord{ + Level: lvl, + Created: time.Now(), + Source: src, + Message: closure(), + } + + // Dispatch the logs + for _, filt := range log { + if lvl < filt.Level { + continue + } + filt.LogWrite(rec) + } +} + +// Send a log message with manual level, source, and message. +func (log Logger) Log(lvl Level, source, message string) { + skip := true + + // Determine if any logging will be done + for _, filt := range log { + if lvl >= filt.Level { + skip = false + break + } + } + if skip { + return + } + + // Make the log record + rec := &LogRecord{ + Level: lvl, + Created: time.Now(), + Source: source, + Message: message, + } + + // Dispatch the logs + for _, filt := range log { + if lvl < filt.Level { + continue + } + filt.LogWrite(rec) + } +} + +// Logf logs a formatted log message at the given log level, using the caller as +// its source. +func (log Logger) Logf(lvl Level, format string, args ...interface{}) { + log.intLogf(lvl, format, args...) +} + +// Logc logs a string returned by the closure at the given log level, using the caller as +// its source. If no log message would be written, the closure is never called. +func (log Logger) Logc(lvl Level, closure func() string) { + log.intLogc(lvl, closure) +} + +// Finest logs a message at the finest log level. +// See Debug for an explanation of the arguments. +func (log Logger) Finest(arg0 interface{}, args ...interface{}) { + const ( + lvl = FINEST + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + log.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + log.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Fine logs a message at the fine log level. +// See Debug for an explanation of the arguments. +func (log Logger) Fine(arg0 interface{}, args ...interface{}) { + const ( + lvl = FINE + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + log.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + log.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Debug is a utility method for debug log messages. +// The behavior of Debug depends on the first argument: +// - arg0 is a string +// When given a string as the first argument, this behaves like Logf but with +// the DEBUG log level: the first argument is interpreted as a format for the +// latter arguments. +// - arg0 is a func()string +// When given a closure of type func()string, this logs the string returned by +// the closure iff it will be logged. The closure runs at most one time. +// - arg0 is interface{} +// When given anything else, the log message will be each of the arguments +// formatted with %v and separated by spaces (ala Sprint). +func (log Logger) Debug(arg0 interface{}, args ...interface{}) { + const ( + lvl = DEBUG + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + log.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + log.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Trace logs a message at the trace log level. +// See Debug for an explanation of the arguments. +func (log Logger) Trace(arg0 interface{}, args ...interface{}) { + const ( + lvl = TRACE + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + log.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + log.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Info logs a message at the info log level. +// See Debug for an explanation of the arguments. +func (log Logger) Info(arg0 interface{}, args ...interface{}) { + const ( + lvl = INFO + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + log.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + log.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Warn logs a message at the warning log level and returns the formatted error. +// At the warning level and higher, there is no performance benefit if the +// message is not actually logged, because all formats are processed and all +// closures are executed to format the error message. +// See Debug for further explanation of the arguments. +func (log Logger) Warn(arg0 interface{}, args ...interface{}) error { + const ( + lvl = WARNING + ) + var msg string + switch first := arg0.(type) { + case string: + // Use the string as a format string + msg = fmt.Sprintf(first, args...) + case func() string: + // Log the closure (no other arguments used) + msg = first() + default: + // Build a format string so that it will be similar to Sprint + msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + } + log.intLogf(lvl, msg) + return errors.New(msg) +} + +// Error logs a message at the error log level and returns the formatted error, +// See Warn for an explanation of the performance and Debug for an explanation +// of the parameters. +func (log Logger) Error(arg0 interface{}, args ...interface{}) error { + const ( + lvl = ERROR + ) + var msg string + switch first := arg0.(type) { + case string: + // Use the string as a format string + msg = fmt.Sprintf(first, args...) + case func() string: + // Log the closure (no other arguments used) + msg = first() + default: + // Build a format string so that it will be similar to Sprint + msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + } + log.intLogf(lvl, msg) + return errors.New(msg) +} + +// Critical logs a message at the critical log level and returns the formatted error, +// See Warn for an explanation of the performance and Debug for an explanation +// of the parameters. +func (log Logger) Critical(arg0 interface{}, args ...interface{}) error { + const ( + lvl = CRITICAL + ) + var msg string + switch first := arg0.(type) { + case string: + // Use the string as a format string + msg = fmt.Sprintf(first, args...) + case func() string: + // Log the closure (no other arguments used) + msg = first() + default: + // Build a format string so that it will be similar to Sprint + msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + } + log.intLogf(lvl, msg) + return errors.New(msg) +} diff --git a/vendor/github.com/alecthomas/log4go/pattlog.go b/vendor/github.com/alecthomas/log4go/pattlog.go new file mode 100644 index 00000000..82b4e36b --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/pattlog.go @@ -0,0 +1,126 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +const ( + FORMAT_DEFAULT = "[%D %T] [%L] (%S) %M" + FORMAT_SHORT = "[%t %d] [%L] %M" + FORMAT_ABBREV = "[%L] %M" +) + +type formatCacheType struct { + LastUpdateSeconds int64 + shortTime, shortDate string + longTime, longDate string +} + +var formatCache = &formatCacheType{} + +// Known format codes: +// %T - Time (15:04:05 MST) +// %t - Time (15:04) +// %D - Date (2006/01/02) +// %d - Date (01/02/06) +// %L - Level (FNST, FINE, DEBG, TRAC, WARN, EROR, CRIT) +// %S - Source +// %M - Message +// Ignores unknown formats +// Recommended: "[%D %T] [%L] (%S) %M" +func FormatLogRecord(format string, rec *LogRecord) string { + if rec == nil { + return "" + } + if len(format) == 0 { + return "" + } + + out := bytes.NewBuffer(make([]byte, 0, 64)) + secs := rec.Created.UnixNano() / 1e9 + + cache := *formatCache + if cache.LastUpdateSeconds != secs { + month, day, year := rec.Created.Month(), rec.Created.Day(), rec.Created.Year() + hour, minute, second := rec.Created.Hour(), rec.Created.Minute(), rec.Created.Second() + zone, _ := rec.Created.Zone() + updated := &formatCacheType{ + LastUpdateSeconds: secs, + shortTime: fmt.Sprintf("%02d:%02d", hour, minute), + shortDate: fmt.Sprintf("%02d/%02d/%02d", day, month, year%100), + longTime: fmt.Sprintf("%02d:%02d:%02d %s", hour, minute, second, zone), + longDate: fmt.Sprintf("%04d/%02d/%02d", year, month, day), + } + cache = *updated + formatCache = updated + } + + // Split the string into pieces by % signs + pieces := bytes.Split([]byte(format), []byte{'%'}) + + // Iterate over the pieces, replacing known formats + for i, piece := range pieces { + if i > 0 && len(piece) > 0 { + switch piece[0] { + case 'T': + out.WriteString(cache.longTime) + case 't': + out.WriteString(cache.shortTime) + case 'D': + out.WriteString(cache.longDate) + case 'd': + out.WriteString(cache.shortDate) + case 'L': + out.WriteString(levelStrings[rec.Level]) + case 'S': + out.WriteString(rec.Source) + case 's': + slice := strings.Split(rec.Source, "/") + out.WriteString(slice[len(slice)-1]) + case 'M': + out.WriteString(rec.Message) + } + if len(piece) > 1 { + out.Write(piece[1:]) + } + } else if len(piece) > 0 { + out.Write(piece) + } + } + out.WriteByte('\n') + + return out.String() +} + +// This is the standard writer that prints to standard output. +type FormatLogWriter chan *LogRecord + +// This creates a new FormatLogWriter +func NewFormatLogWriter(out io.Writer, format string) FormatLogWriter { + records := make(FormatLogWriter, LogBufferLength) + go records.run(out, format) + return records +} + +func (w FormatLogWriter) run(out io.Writer, format string) { + for rec := range w { + fmt.Fprint(out, FormatLogRecord(format, rec)) + } +} + +// This is the FormatLogWriter's output method. This will block if the output +// buffer is full. +func (w FormatLogWriter) LogWrite(rec *LogRecord) { + w <- rec +} + +// Close stops the logger from sending messages to standard output. Attempts to +// send log messages to this logger after a Close have undefined behavior. +func (w FormatLogWriter) Close() { + close(w) +} diff --git a/vendor/github.com/alecthomas/log4go/socklog.go b/vendor/github.com/alecthomas/log4go/socklog.go new file mode 100644 index 00000000..1d224a99 --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/socklog.go @@ -0,0 +1,57 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "encoding/json" + "fmt" + "net" + "os" +) + +// This log writer sends output to a socket +type SocketLogWriter chan *LogRecord + +// This is the SocketLogWriter's output method +func (w SocketLogWriter) LogWrite(rec *LogRecord) { + w <- rec +} + +func (w SocketLogWriter) Close() { + close(w) +} + +func NewSocketLogWriter(proto, hostport string) SocketLogWriter { + sock, err := net.Dial(proto, hostport) + if err != nil { + fmt.Fprintf(os.Stderr, "NewSocketLogWriter(%q): %s\n", hostport, err) + return nil + } + + w := SocketLogWriter(make(chan *LogRecord, LogBufferLength)) + + go func() { + defer func() { + if sock != nil && proto == "tcp" { + sock.Close() + } + }() + + for rec := range w { + // Marshall into JSON + js, err := json.Marshal(rec) + if err != nil { + fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err) + return + } + + _, err = sock.Write(js) + if err != nil { + fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err) + return + } + } + }() + + return w +} diff --git a/vendor/github.com/alecthomas/log4go/termlog.go b/vendor/github.com/alecthomas/log4go/termlog.go new file mode 100644 index 00000000..8a941e26 --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/termlog.go @@ -0,0 +1,49 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "fmt" + "io" + "os" + "time" +) + +var stdout io.Writer = os.Stdout + +// This is the standard writer that prints to standard output. +type ConsoleLogWriter struct { + format string + w chan *LogRecord +} + +// This creates a new ConsoleLogWriter +func NewConsoleLogWriter() *ConsoleLogWriter { + consoleWriter := &ConsoleLogWriter{ + format: "[%T %D] [%L] (%S) %M", + w: make(chan *LogRecord, LogBufferLength), + } + go consoleWriter.run(stdout) + return consoleWriter +} +func (c *ConsoleLogWriter) SetFormat(format string) { + c.format = format +} +func (c *ConsoleLogWriter) run(out io.Writer) { + for rec := range c.w { + fmt.Fprint(out, FormatLogRecord(c.format, rec)) + } +} + +// This is the ConsoleLogWriter's output method. This will block if the output +// buffer is full. +func (c *ConsoleLogWriter) LogWrite(rec *LogRecord) { + c.w <- rec +} + +// Close stops the logger from sending messages to standard output. Attempts to +// send log messages to this logger after a Close have undefined behavior. +func (c *ConsoleLogWriter) Close() { + close(c.w) + time.Sleep(50 * time.Millisecond) // Try to give console I/O time to complete +} diff --git a/vendor/github.com/alecthomas/log4go/wrapper.go b/vendor/github.com/alecthomas/log4go/wrapper.go new file mode 100644 index 00000000..2ae222b0 --- /dev/null +++ b/vendor/github.com/alecthomas/log4go/wrapper.go @@ -0,0 +1,278 @@ +// Copyright (C) 2010, Kyle Lemons . All rights reserved. + +package log4go + +import ( + "errors" + "fmt" + "os" + "strings" +) + +var ( + Global Logger +) + +func init() { + Global = NewDefaultLogger(DEBUG) +} + +// Wrapper for (*Logger).LoadConfiguration +func LoadConfiguration(filename string) { + Global.LoadConfiguration(filename) +} + +// Wrapper for (*Logger).AddFilter +func AddFilter(name string, lvl Level, writer LogWriter) { + Global.AddFilter(name, lvl, writer) +} + +// Wrapper for (*Logger).Close (closes and removes all logwriters) +func Close() { + Global.Close() +} + +func Crash(args ...interface{}) { + if len(args) > 0 { + Global.intLogf(CRITICAL, strings.Repeat(" %v", len(args))[1:], args...) + } + panic(args) +} + +// Logs the given message and crashes the program +func Crashf(format string, args ...interface{}) { + Global.intLogf(CRITICAL, format, args...) + Global.Close() // so that hopefully the messages get logged + panic(fmt.Sprintf(format, args...)) +} + +// Compatibility with `log` +func Exit(args ...interface{}) { + if len(args) > 0 { + Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...) + } + Global.Close() // so that hopefully the messages get logged + os.Exit(0) +} + +// Compatibility with `log` +func Exitf(format string, args ...interface{}) { + Global.intLogf(ERROR, format, args...) + Global.Close() // so that hopefully the messages get logged + os.Exit(0) +} + +// Compatibility with `log` +func Stderr(args ...interface{}) { + if len(args) > 0 { + Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...) + } +} + +// Compatibility with `log` +func Stderrf(format string, args ...interface{}) { + Global.intLogf(ERROR, format, args...) +} + +// Compatibility with `log` +func Stdout(args ...interface{}) { + if len(args) > 0 { + Global.intLogf(INFO, strings.Repeat(" %v", len(args))[1:], args...) + } +} + +// Compatibility with `log` +func Stdoutf(format string, args ...interface{}) { + Global.intLogf(INFO, format, args...) +} + +// Send a log message manually +// Wrapper for (*Logger).Log +func Log(lvl Level, source, message string) { + Global.Log(lvl, source, message) +} + +// Send a formatted log message easily +// Wrapper for (*Logger).Logf +func Logf(lvl Level, format string, args ...interface{}) { + Global.intLogf(lvl, format, args...) +} + +// Send a closure log message +// Wrapper for (*Logger).Logc +func Logc(lvl Level, closure func() string) { + Global.intLogc(lvl, closure) +} + +// Utility for finest log messages (see Debug() for parameter explanation) +// Wrapper for (*Logger).Finest +func Finest(arg0 interface{}, args ...interface{}) { + const ( + lvl = FINEST + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + Global.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Utility for fine log messages (see Debug() for parameter explanation) +// Wrapper for (*Logger).Fine +func Fine(arg0 interface{}, args ...interface{}) { + const ( + lvl = FINE + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + Global.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Utility for debug log messages +// When given a string as the first argument, this behaves like Logf but with the DEBUG log level (e.g. the first argument is interpreted as a format for the latter arguments) +// When given a closure of type func()string, this logs the string returned by the closure iff it will be logged. The closure runs at most one time. +// When given anything else, the log message will be each of the arguments formatted with %v and separated by spaces (ala Sprint). +// Wrapper for (*Logger).Debug +func Debug(arg0 interface{}, args ...interface{}) { + const ( + lvl = DEBUG + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + Global.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Utility for trace log messages (see Debug() for parameter explanation) +// Wrapper for (*Logger).Trace +func Trace(arg0 interface{}, args ...interface{}) { + const ( + lvl = TRACE + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + Global.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Utility for info log messages (see Debug() for parameter explanation) +// Wrapper for (*Logger).Info +func Info(arg0 interface{}, args ...interface{}) { + const ( + lvl = INFO + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + case func() string: + // Log the closure (no other arguments used) + Global.intLogc(lvl, first) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) + } +} + +// Utility for warn log messages (returns an error for easy function returns) (see Debug() for parameter explanation) +// These functions will execute a closure exactly once, to build the error message for the return +// Wrapper for (*Logger).Warn +func Warn(arg0 interface{}, args ...interface{}) error { + const ( + lvl = WARNING + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + return errors.New(fmt.Sprintf(first, args...)) + case func() string: + // Log the closure (no other arguments used) + str := first() + Global.intLogf(lvl, "%s", str) + return errors.New(str) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) + } + return nil +} + +// Utility for error log messages (returns an error for easy function returns) (see Debug() for parameter explanation) +// These functions will execute a closure exactly once, to build the error message for the return +// Wrapper for (*Logger).Error +func Error(arg0 interface{}, args ...interface{}) error { + const ( + lvl = ERROR + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + return errors.New(fmt.Sprintf(first, args...)) + case func() string: + // Log the closure (no other arguments used) + str := first() + Global.intLogf(lvl, "%s", str) + return errors.New(str) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) + } + return nil +} + +// Utility for critical log messages (returns an error for easy function returns) (see Debug() for parameter explanation) +// These functions will execute a closure exactly once, to build the error message for the return +// Wrapper for (*Logger).Critical +func Critical(arg0 interface{}, args ...interface{}) error { + const ( + lvl = CRITICAL + ) + switch first := arg0.(type) { + case string: + // Use the string as a format string + Global.intLogf(lvl, first, args...) + return errors.New(fmt.Sprintf(first, args...)) + case func() string: + // Log the closure (no other arguments used) + str := first() + Global.intLogf(lvl, "%s", str) + return errors.New(str) + default: + // Build a format string so that it will be similar to Sprint + Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) + return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) + } + return nil +} diff --git a/vendor/github.com/gorilla/schema/LICENSE b/vendor/github.com/gorilla/schema/LICENSE new file mode 100644 index 00000000..0e5fb872 --- /dev/null +++ b/vendor/github.com/gorilla/schema/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/schema/cache.go b/vendor/github.com/gorilla/schema/cache.go new file mode 100644 index 00000000..1613b1e6 --- /dev/null +++ b/vendor/github.com/gorilla/schema/cache.go @@ -0,0 +1,245 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schema + +import ( + "errors" + "reflect" + "strconv" + "strings" + "sync" +) + +var invalidPath = errors.New("schema: invalid path") + +// newCache returns a new cache. +func newCache() *cache { + c := cache{ + m: make(map[reflect.Type]*structInfo), + conv: make(map[reflect.Kind]Converter), + regconv: make(map[reflect.Type]Converter), + tag: "schema", + } + for k, v := range converters { + c.conv[k] = v + } + return &c +} + +// cache caches meta-data about a struct. +type cache struct { + l sync.RWMutex + m map[reflect.Type]*structInfo + conv map[reflect.Kind]Converter + regconv map[reflect.Type]Converter + tag string +} + +// parsePath parses a path in dotted notation verifying that it is a valid +// path to a struct field. +// +// It returns "path parts" which contain indices to fields to be used by +// reflect.Value.FieldByString(). Multiple parts are required for slices of +// structs. +func (c *cache) parsePath(p string, t reflect.Type) ([]pathPart, error) { + var struc *structInfo + var field *fieldInfo + var index64 int64 + var err error + parts := make([]pathPart, 0) + path := make([]string, 0) + keys := strings.Split(p, ".") + for i := 0; i < len(keys); i++ { + if t.Kind() != reflect.Struct { + return nil, invalidPath + } + if struc = c.get(t); struc == nil { + return nil, invalidPath + } + if field = struc.get(keys[i]); field == nil { + return nil, invalidPath + } + // Valid field. Append index. + path = append(path, field.name) + if field.ss { + // Parse a special case: slices of structs. + // i+1 must be the slice index. + // + // Now that struct can implements TextUnmarshaler interface, + // we don't need to force the struct's fields to appear in the path. + // So checking i+2 is not necessary anymore. + i++ + if i+1 > len(keys) { + return nil, invalidPath + } + if index64, err = strconv.ParseInt(keys[i], 10, 0); err != nil { + return nil, invalidPath + } + parts = append(parts, pathPart{ + path: path, + field: field, + index: int(index64), + }) + path = make([]string, 0) + + // Get the next struct type, dropping ptrs. + if field.typ.Kind() == reflect.Ptr { + t = field.typ.Elem() + } else { + t = field.typ + } + if t.Kind() == reflect.Slice { + t = t.Elem() + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + } + } else if field.typ.Kind() == reflect.Ptr { + t = field.typ.Elem() + } else { + t = field.typ + } + } + // Add the remaining. + parts = append(parts, pathPart{ + path: path, + field: field, + index: -1, + }) + return parts, nil +} + +// get returns a cached structInfo, creating it if necessary. +func (c *cache) get(t reflect.Type) *structInfo { + c.l.RLock() + info := c.m[t] + c.l.RUnlock() + if info == nil { + info = c.create(t, nil) + c.l.Lock() + c.m[t] = info + c.l.Unlock() + } + return info +} + +// create creates a structInfo with meta-data about a struct. +func (c *cache) create(t reflect.Type, info *structInfo) *structInfo { + if info == nil { + info = &structInfo{fields: []*fieldInfo{}} + } + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.Anonymous { + ft := field.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + c.create(ft, info) + } + } + c.createField(field, info) + } + return info +} + +// createField creates a fieldInfo for the given field. +func (c *cache) createField(field reflect.StructField, info *structInfo) { + alias := fieldAlias(field, c.tag) + if alias == "-" { + // Ignore this field. + return + } + // Check if the type is supported and don't cache it if not. + // First let's get the basic type. + isSlice, isStruct := false, false + ft := field.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if isSlice = ft.Kind() == reflect.Slice; isSlice { + ft = ft.Elem() + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + } + if ft.Kind() == reflect.Array { + ft = ft.Elem() + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + } + if isStruct = ft.Kind() == reflect.Struct; !isStruct { + if conv := c.conv[ft.Kind()]; conv == nil { + // Type is not supported. + return + } + } + + info.fields = append(info.fields, &fieldInfo{ + typ: field.Type, + name: field.Name, + ss: isSlice && isStruct, + alias: alias, + }) +} + +// converter returns the converter for a type. +func (c *cache) converter(t reflect.Type) Converter { + conv := c.regconv[t] + if conv == nil { + conv = c.conv[t.Kind()] + } + return conv +} + +// ---------------------------------------------------------------------------- + +type structInfo struct { + fields []*fieldInfo +} + +func (i *structInfo) get(alias string) *fieldInfo { + for _, field := range i.fields { + if strings.EqualFold(field.alias, alias) { + return field + } + } + return nil +} + +type fieldInfo struct { + typ reflect.Type + name string // field name in the struct. + ss bool // true if this is a slice of structs. + alias string +} + +type pathPart struct { + field *fieldInfo + path []string // path to the field: walks structs using field names. + index int // struct index in slices of structs. +} + +// ---------------------------------------------------------------------------- + +// fieldAlias parses a field tag to get a field alias. +func fieldAlias(field reflect.StructField, tagName string) string { + var alias string + if tag := field.Tag.Get(tagName); tag != "" { + // For now tags only support the name but let's follow the + // comma convention from encoding/json and others. + if idx := strings.Index(tag, ","); idx == -1 { + alias = tag + } else { + alias = tag[:idx] + } + } + if alias == "" { + alias = field.Name + } + return alias +} diff --git a/vendor/github.com/gorilla/schema/converter.go b/vendor/github.com/gorilla/schema/converter.go new file mode 100644 index 00000000..b33e9423 --- /dev/null +++ b/vendor/github.com/gorilla/schema/converter.go @@ -0,0 +1,145 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schema + +import ( + "reflect" + "strconv" +) + +type Converter func(string) reflect.Value + +var ( + invalidValue = reflect.Value{} + boolType = reflect.Bool + float32Type = reflect.Float32 + float64Type = reflect.Float64 + intType = reflect.Int + int8Type = reflect.Int8 + int16Type = reflect.Int16 + int32Type = reflect.Int32 + int64Type = reflect.Int64 + stringType = reflect.String + uintType = reflect.Uint + uint8Type = reflect.Uint8 + uint16Type = reflect.Uint16 + uint32Type = reflect.Uint32 + uint64Type = reflect.Uint64 +) + +// Default converters for basic types. +var converters = map[reflect.Kind]Converter{ + boolType: convertBool, + float32Type: convertFloat32, + float64Type: convertFloat64, + intType: convertInt, + int8Type: convertInt8, + int16Type: convertInt16, + int32Type: convertInt32, + int64Type: convertInt64, + stringType: convertString, + uintType: convertUint, + uint8Type: convertUint8, + uint16Type: convertUint16, + uint32Type: convertUint32, + uint64Type: convertUint64, +} + +func convertBool(value string) reflect.Value { + if value == "on" { + return reflect.ValueOf(true) + } else if v, err := strconv.ParseBool(value); err == nil { + return reflect.ValueOf(v) + } + return invalidValue +} + +func convertFloat32(value string) reflect.Value { + if v, err := strconv.ParseFloat(value, 32); err == nil { + return reflect.ValueOf(float32(v)) + } + return invalidValue +} + +func convertFloat64(value string) reflect.Value { + if v, err := strconv.ParseFloat(value, 64); err == nil { + return reflect.ValueOf(v) + } + return invalidValue +} + +func convertInt(value string) reflect.Value { + if v, err := strconv.ParseInt(value, 10, 0); err == nil { + return reflect.ValueOf(int(v)) + } + return invalidValue +} + +func convertInt8(value string) reflect.Value { + if v, err := strconv.ParseInt(value, 10, 8); err == nil { + return reflect.ValueOf(int8(v)) + } + return invalidValue +} + +func convertInt16(value string) reflect.Value { + if v, err := strconv.ParseInt(value, 10, 16); err == nil { + return reflect.ValueOf(int16(v)) + } + return invalidValue +} + +func convertInt32(value string) reflect.Value { + if v, err := strconv.ParseInt(value, 10, 32); err == nil { + return reflect.ValueOf(int32(v)) + } + return invalidValue +} + +func convertInt64(value string) reflect.Value { + if v, err := strconv.ParseInt(value, 10, 64); err == nil { + return reflect.ValueOf(v) + } + return invalidValue +} + +func convertString(value string) reflect.Value { + return reflect.ValueOf(value) +} + +func convertUint(value string) reflect.Value { + if v, err := strconv.ParseUint(value, 10, 0); err == nil { + return reflect.ValueOf(uint(v)) + } + return invalidValue +} + +func convertUint8(value string) reflect.Value { + if v, err := strconv.ParseUint(value, 10, 8); err == nil { + return reflect.ValueOf(uint8(v)) + } + return invalidValue +} + +func convertUint16(value string) reflect.Value { + if v, err := strconv.ParseUint(value, 10, 16); err == nil { + return reflect.ValueOf(uint16(v)) + } + return invalidValue +} + +func convertUint32(value string) reflect.Value { + if v, err := strconv.ParseUint(value, 10, 32); err == nil { + return reflect.ValueOf(uint32(v)) + } + return invalidValue +} + +func convertUint64(value string) reflect.Value { + if v, err := strconv.ParseUint(value, 10, 64); err == nil { + return reflect.ValueOf(v) + } + return invalidValue +} diff --git a/vendor/github.com/gorilla/schema/decoder.go b/vendor/github.com/gorilla/schema/decoder.go new file mode 100644 index 00000000..53b0337f --- /dev/null +++ b/vendor/github.com/gorilla/schema/decoder.go @@ -0,0 +1,299 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package schema + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strings" +) + +// NewDecoder returns a new Decoder. +func NewDecoder() *Decoder { + return &Decoder{cache: newCache()} +} + +// Decoder decodes values from a map[string][]string to a struct. +type Decoder struct { + cache *cache + zeroEmpty bool + ignoreUnknownKeys bool +} + +// SetAliasTag changes the tag used to locate custom field aliases. +// The default tag is "schema". +func (d *Decoder) SetAliasTag(tag string) { + d.cache.tag = tag +} + +// ZeroEmpty controls the behaviour when the decoder encounters empty values +// in a map. +// If z is true and a key in the map has the empty string as a value +// then the corresponding struct field is set to the zero value. +// If z is false then empty strings are ignored. +// +// The default value is false, that is empty values do not change +// the value of the struct field. +func (d *Decoder) ZeroEmpty(z bool) { + d.zeroEmpty = z +} + +// IgnoreUnknownKeys controls the behaviour when the decoder encounters unknown +// keys in the map. +// If i is true and an unknown field is encountered, it is ignored. This is +// similar to how unknown keys are handled by encoding/json. +// If i is false then Decode will return an error. Note that any valid keys +// will still be decoded in to the target struct. +// +// To preserve backwards compatibility, the default value is false. +func (d *Decoder) IgnoreUnknownKeys(i bool) { + d.ignoreUnknownKeys = i +} + +// RegisterConverter registers a converter function for a custom type. +func (d *Decoder) RegisterConverter(value interface{}, converterFunc Converter) { + d.cache.regconv[reflect.TypeOf(value)] = converterFunc +} + +// Decode decodes a map[string][]string to a struct. +// +// The first parameter must be a pointer to a struct. +// +// The second parameter is a map, typically url.Values from an HTTP request. +// Keys are "paths" in dotted notation to the struct fields and nested structs. +// +// See the package documentation for a full explanation of the mechanics. +func (d *Decoder) Decode(dst interface{}, src map[string][]string) error { + v := reflect.ValueOf(dst) + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return errors.New("schema: interface must be a pointer to struct") + } + v = v.Elem() + t := v.Type() + errors := MultiError{} + for path, values := range src { + if parts, err := d.cache.parsePath(path, t); err == nil { + if err = d.decode(v, path, parts, values); err != nil { + errors[path] = err + } + } else if !d.ignoreUnknownKeys { + errors[path] = fmt.Errorf("schema: invalid path %q", path) + } + } + if len(errors) > 0 { + return errors + } + return nil +} + +// decode fills a struct field using a parsed path. +func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values []string) error { + // Get the field walking the struct fields by index. + for _, name := range parts[0].path { + if v.Type().Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = v.FieldByName(name) + } + + // Don't even bother for unexported fields. + if !v.CanSet() { + return nil + } + + // Dereference if needed. + t := v.Type() + if t.Kind() == reflect.Ptr { + t = t.Elem() + if v.IsNil() { + v.Set(reflect.New(t)) + } + v = v.Elem() + } + + // Slice of structs. Let's go recursive. + if len(parts) > 1 { + idx := parts[0].index + if v.IsNil() || v.Len() < idx+1 { + value := reflect.MakeSlice(t, idx+1, idx+1) + if v.Len() < idx+1 { + // Resize it. + reflect.Copy(value, v) + } + v.Set(value) + } + return d.decode(v.Index(idx), path, parts[1:], values) + } + + // Get the converter early in case there is one for a slice type. + conv := d.cache.converter(t) + if conv == nil && t.Kind() == reflect.Slice { + var items []reflect.Value + elemT := t.Elem() + isPtrElem := elemT.Kind() == reflect.Ptr + if isPtrElem { + elemT = elemT.Elem() + } + + // Try to get a converter for the element type. + conv := d.cache.converter(elemT) + if conv == nil { + // As we are not dealing with slice of structs here, we don't need to check if the type + // implements TextUnmarshaler interface + return fmt.Errorf("schema: converter not found for %v", elemT) + } + + for key, value := range values { + if value == "" { + if d.zeroEmpty { + items = append(items, reflect.Zero(elemT)) + } + } else if item := conv(value); item.IsValid() { + if isPtrElem { + ptr := reflect.New(elemT) + ptr.Elem().Set(item) + item = ptr + } + if item.Type() != elemT && !isPtrElem { + item = item.Convert(elemT) + } + items = append(items, item) + } else { + if strings.Contains(value, ",") { + values := strings.Split(value, ",") + for _, value := range values { + if value == "" { + if d.zeroEmpty { + items = append(items, reflect.Zero(elemT)) + } + } else if item := conv(value); item.IsValid() { + if isPtrElem { + ptr := reflect.New(elemT) + ptr.Elem().Set(item) + item = ptr + } + if item.Type() != elemT && !isPtrElem { + item = item.Convert(elemT) + } + items = append(items, item) + } else { + return ConversionError{ + Key: path, + Type: elemT, + Index: key, + } + } + } + } else { + return ConversionError{ + Key: path, + Type: elemT, + Index: key, + } + } + } + } + value := reflect.Append(reflect.MakeSlice(t, 0, 0), items...) + v.Set(value) + } else { + val := "" + // Use the last value provided if any values were provided + if len(values) > 0 { + val = values[len(values)-1] + } + + if val == "" { + if d.zeroEmpty { + v.Set(reflect.Zero(t)) + } + } else if conv != nil { + if value := conv(val); value.IsValid() { + v.Set(value.Convert(t)) + } else { + return ConversionError{ + Key: path, + Type: t, + Index: -1, + } + } + } else { + // When there's no registered conversion for the custom type, we will check if the type + // implements the TextUnmarshaler interface. As the UnmarshalText function should be applied + // to the pointer of the type, we convert the value to pointer. + if v.CanAddr() { + v = v.Addr() + } + + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + if err := u.UnmarshalText([]byte(val)); err != nil { + return ConversionError{ + Key: path, + Type: t, + Index: -1, + Err: err, + } + } + + } else { + return fmt.Errorf("schema: converter not found for %v", t) + } + } + } + return nil +} + +// Errors --------------------------------------------------------------------- + +// ConversionError stores information about a failed conversion. +type ConversionError struct { + Key string // key from the source map. + Type reflect.Type // expected type of elem + Index int // index for multi-value fields; -1 for single-value fields. + Err error // low-level error (when it exists) +} + +func (e ConversionError) Error() string { + var output string + + if e.Index < 0 { + output = fmt.Sprintf("schema: error converting value for %q", e.Key) + } else { + output = fmt.Sprintf("schema: error converting value for index %d of %q", + e.Index, e.Key) + } + + if e.Err != nil { + output = fmt.Sprintf("%s. Details: %s", output, e.Err) + } + + return output +} + +// MultiError stores multiple decoding errors. +// +// Borrowed from the App Engine SDK. +type MultiError map[string]error + +func (e MultiError) Error() string { + s := "" + for _, err := range e { + s = err.Error() + break + } + switch len(e) { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, len(e)-1) +} diff --git a/vendor/github.com/gorilla/schema/doc.go b/vendor/github.com/gorilla/schema/doc.go new file mode 100644 index 00000000..22c0ff4b --- /dev/null +++ b/vendor/github.com/gorilla/schema/doc.go @@ -0,0 +1,148 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/schema fills a struct with form values. + +The basic usage is really simple. Given this struct: + + type Person struct { + Name string + Phone string + } + +...we can fill it passing a map to the Load() function: + + values := map[string][]string{ + "Name": {"John"}, + "Phone": {"999-999-999"}, + } + person := new(Person) + decoder := schema.NewDecoder() + decoder.Decode(person, values) + +This is just a simple example and it doesn't make a lot of sense to create +the map manually. Typically it will come from a http.Request object and +will be of type url.Values: http.Request.Form or http.Request.MultipartForm: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + + if err != nil { + // Handle error + } + + decoder := schema.NewDecoder() + // r.PostForm is a map of our POST form values + err := decoder.Decode(person, r.PostForm) + + if err != nil { + // Handle error + } + + // Do something with person.Name or person.Phone + } + +Note: it is a good idea to set a Decoder instance as a package global, +because it caches meta-data about structs, and a instance can be shared safely: + + var decoder = schema.NewDecoder() + +To define custom names for fields, use a struct tag "schema". To not populate +certain fields, use a dash for the name and it will be ignored: + + type Person struct { + Name string `schema:"name"` // custom name + Phone string `schema:"phone"` // custom name + Admin bool `schema:"-"` // this field is never set + } + +The supported field types in the destination struct are: + + * bool + * float variants (float32, float64) + * int variants (int, int8, int16, int32, int64) + * string + * uint variants (uint, uint8, uint16, uint32, uint64) + * struct + * a pointer to one of the above types + * a slice or a pointer to a slice of one of the above types + +Non-supported types are simply ignored, however custom types can be registered +to be converted. + +To fill nested structs, keys must use a dotted notation as the "path" for the +field. So for example, to fill the struct Person below: + + type Phone struct { + Label string + Number string + } + + type Person struct { + Name string + Phone Phone + } + +...the source map must have the keys "Name", "Phone.Label" and "Phone.Number". +This means that an HTML form to fill a Person struct must look like this: + +
+ + + +
+ +Single values are filled using the first value for a key from the source map. +Slices are filled using all values for a key from the source map. So to fill +a Person with multiple Phone values, like: + + type Person struct { + Name string + Phones []Phone + } + +...an HTML form that accepts three Phone values would look like this: + +
+ + + + + + + +
+ +Notice that only for slices of structs the slice index is required. +This is needed for disambiguation: if the nested struct also had a slice +field, we could not translate multiple values to it if we did not use an +index for the parent struct. + +There's also the possibility to create a custom type that implements the +TextUnmarshaler interface, and in this case there's no need to registry +a converter, like: + + type Person struct { + Emails []Email + } + + type Email struct { + *mail.Address + } + + func (e *Email) UnmarshalText(text []byte) (err error) { + e.Address, err = mail.ParseAddress(string(text)) + return + } + +...an HTML form that accepts three Email values would look like this: + +
+ + + +
+*/ +package schema diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 00000000..9171c972 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 00000000..a353e185 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,350 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/base64" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // Input and output buffer sizes. If the buffer size is zero, then a + // default value of 4096 is used. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +// parseURL parses the URL. +// +// This function is a replacement for the standard library url.Parse function. +// In Go 1.4 and earlier, url.Parse loses information from the path. +func parseURL(s string) (*url.URL, error) { + // From the RFC: + // + // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] + // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] + + var u url.URL + switch { + case strings.HasPrefix(s, "ws://"): + u.Scheme = "ws" + s = s[len("ws://"):] + case strings.HasPrefix(s, "wss://"): + u.Scheme = "wss" + s = s[len("wss://"):] + default: + return nil, errMalformedURL + } + + if i := strings.Index(s, "?"); i >= 0 { + u.RawQuery = s[i+1:] + s = s[:i] + } + + if i := strings.Index(s, "/"); i >= 0 { + u.Opaque = s[i:] + s = s[:i] + } else { + u.Opaque = "/" + } + + u.Host = s + + if strings.Contains(u.Host, "@") { + // Don't bother parsing user information because user information is + // not allowed in websocket URIs. + return nil, errMalformedURL + } + + return &u, nil +} + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default zero values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, +} + +// Dial creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + + if d == nil { + d = &Dialer{ + Proxy: http.ProxyFromEnvironment, + } + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := parseURL(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + default: + req.Header[k] = vs + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + + var proxyURL *url.URL + // Check wether the proxy method has been configured + if d.Proxy != nil { + proxyURL, err = d.Proxy(req) + } + if err != nil { + return nil, nil, err + } + + var targetHostPort string + if proxyURL != nil { + targetHostPort, _ = hostPortNoPort(proxyURL) + } else { + targetHostPort = hostPort + } + + var deadline time.Time + if d.HandshakeTimeout != 0 { + deadline = time.Now().Add(d.HandshakeTimeout) + } + + netDial := d.NetDial + if netDial == nil { + netDialer := &net.Dialer{Deadline: deadline} + netDial = netDialer.Dial + } + + netConn, err := netDial("tcp", targetHostPort) + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if err := netConn.SetDeadline(deadline); err != nil { + return nil, nil, err + } + + if proxyURL != nil { + connectHeader := make(http.Header) + if user := proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: hostPort}, + Host: hostPort, + Header: connectHeader, + } + + connectReq.Write(netConn) + + // Read response. + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(netConn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 200 { + f := strings.SplitN(resp.Status, " ", 2) + return nil, nil, errors.New(f[1]) + } + } + + if u.Scheme == "https" { + cfg := d.TLSClientConfig + if cfg == nil { + cfg = &tls.Config{ServerName: hostNoPort} + } else if cfg.ServerName == "" { + shallowCopy := *cfg + cfg = &shallowCopy + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + if err := tlsConn.Handshake(); err != nil { + return nil, nil, err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return nil, nil, err + } + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 00000000..eff26c63 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,915 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "time" +) + +const ( + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + finalBit = 1 << 7 + maskBit = 1 << 7 + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents close frame. +type CloseError struct { + + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +// Conn represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn and closeSent + closeSent bool // true if close message was sent + + // Message writer fields. + writeErr error + writeBuf []byte // frame is constructed in this buffer. + writePos int // end of data in writeBuf. + writeFrameType int // type of the current frame. + writeSeq int // incremented to invalidate message writers. + writeDeadline time.Time + isWriting bool // for best-effort concurrent write detection + + // Read fields + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readSeq int // incremented to invalidate message readers. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + readErrCount int +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { + mu := make(chan bool, 1) + mu <- true + + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } + if writeBufferSize == 0 { + writeBufferSize = defaultWriteBufferSize + } + + c := &Conn{ + isServer: isServer, + br: bufio.NewReaderSize(conn, readBufferSize), + conn: conn, + mu: mu, + readFinal: true, + writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), + writeFrameType: noFrame, + writePos: maxFrameHeaderSize, + } + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting for a close frame. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { + <-c.mu + defer func() { c.mu <- true }() + + if c.closeSent { + return ErrCloseSent + } else if frameType == CloseMessage { + c.closeSent = true + } + + c.conn.SetWriteDeadline(deadline) + for _, buf := range bufs { + if len(buf) > 0 { + n, err := c.conn.Write(buf) + if n != len(buf) { + // Close on partial write. + c.conn.Close() + } + if err != nil { + return err + } + } + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + if c.closeSent { + return ErrCloseSent + } else if messageType == CloseMessage { + c.closeSent = true + } + + c.conn.SetWriteDeadline(deadline) + n, err := c.conn.Write(buf) + if n != 0 && n != len(buf) { + c.conn.Close() + } + return hideTempErr(err) +} + +// NextWriter returns a writer for the next message to send. The writer's +// Close method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if c.writeErr != nil { + return nil, c.writeErr + } + + if c.writeFrameType != noFrame { + if err := c.flushFrame(true, nil); err != nil { + return nil, err + } + } + + if !isControl(messageType) && !isData(messageType) { + return nil, errBadWriteOpCode + } + + c.writeFrameType = messageType + return messageWriter{c, c.writeSeq}, nil +} + +func (c *Conn) flushFrame(final bool, extra []byte) error { + length := c.writePos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(c.writeFrameType) && + (!final || length > maxControlFramePayloadSize) { + c.writeSeq++ + c.writeFrameType = noFrame + c.writePos = maxFrameHeaderSize + return errInvalidControlFrame + } + + b0 := byte(c.writeFrameType) + if final { + b0 |= finalBit + } + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos]) + if len(extra) > 0 { + c.writeErr = errors.New("websocket: internal error, extra used in client mode") + return c.writeErr + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + // Setup for next frame. + c.writePos = maxFrameHeaderSize + c.writeFrameType = continuationFrame + if final { + c.writeSeq++ + c.writeFrameType = noFrame + } + return c.writeErr +} + +type messageWriter struct { + c *Conn + seq int +} + +func (w messageWriter) err() error { + c := w.c + if c.writeSeq != w.seq { + return errWriteClosed + } + if c.writeErr != nil { + return c.writeErr + } + return nil +} + +func (w messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.c.writePos + if n <= 0 { + if err := w.c.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.c.writePos + } + if n > max { + n = max + } + return n, nil +} + +func (w messageWriter) write(final bool, p []byte) (int, error) { + if err := w.err(); err != nil { + return 0, err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.c.flushFrame(final, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.c.writePos:], p[:n]) + w.c.writePos += n + p = p[n:] + } + return nn, nil +} + +func (w messageWriter) Write(p []byte) (int, error) { + return w.write(false, p) +} + +func (w messageWriter) WriteString(p string) (int, error) { + if err := w.err(); err != nil { + return 0, err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.c.writePos:], p[:n]) + w.c.writePos += n + p = p[n:] + } + return nn, nil +} + +func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if err := w.err(); err != nil { + return 0, err + } + for { + if w.c.writePos == len(w.c.writeBuf) { + err = w.c.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.c.writePos:]) + w.c.writePos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w messageWriter) Close() error { + if err := w.err(); err != nil { + return err + } + return w.c.flushFrame(true, nil) +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + wr, err := c.NextWriter(messageType) + if err != nil { + return err + } + w := wr.(messageWriter) + if _, err := w.write(true, data); err != nil { + return err + } + if c.writeSeq == w.seq { + if err := c.flushFrame(true, nil); err != nil { + return err + } + } + return nil +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +// readFull is like io.ReadFull except that io.EOF is never returned. +func (c *Conn) readFull(p []byte) (err error) { + var n int + for n < len(p) && err == nil { + var nn int + nn, err = c.br.Read(p[n:]) + n += nn + } + if n == len(p) { + err = nil + } else if err == io.EOF { + err = errUnexpectedEOF + } + return +} + +func (c *Conn) advanceFrame() (int, error) { + + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + var b [8]byte + if err := c.readFull(b[:2]); err != nil { + return noFrame, err + } + + final := b[0]&finalBit != 0 + frameType := int(b[0] & 0xf) + reserved := int((b[0] >> 4) & 0x7) + mask := b[1]&maskBit != 0 + c.readRemaining = int64(b[1] & 0x7f) + + if reserved != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + if err := c.readFull(b[:2]); err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(b[:2])) + case 127: + if err := c.readFull(b[:8]); err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(b[:8])) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + if err := c.readFull(c.readMaskKey[:]); err != nil { + return noFrame, err + } + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload = make([]byte, c.readRemaining) + c.readRemaining = 0 + if err := c.readFull(payload); err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + echoMessage := []byte{} + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + echoMessage = payload[:2] + closeCode = int(binary.BigEndian.Uint16(payload)) + closeText = string(payload[2:]) + } + c.WriteControl(CloseMessage, echoMessage, time.Now().Add(writeWait)) + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + + c.readSeq++ + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + return frameType, messageReader{c, c.readSeq}, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct { + c *Conn + seq int +} + +func (r messageReader) Read(b []byte) (int, error) { + + if r.seq != r.c.readSeq { + return 0, io.EOF + } + + for r.c.readErr == nil { + + if r.c.readRemaining > 0 { + if int64(len(b)) > r.c.readRemaining { + b = b[:r.c.readRemaining] + } + n, err := r.c.br.Read(b) + r.c.readErr = hideTempErr(err) + if r.c.isServer { + r.c.readMaskPos = maskBytes(r.c.readMaskKey, r.c.readMaskPos, b[:n]) + } + r.c.readRemaining -= int64(n) + return n, r.c.readErr + } + + if r.c.readFinal { + r.c.readSeq++ + return 0, io.EOF + } + + frameType, err := r.c.advanceFrame() + switch { + case err != nil: + r.c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + r.c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := r.c.readErr + if err == io.EOF && r.seq == r.c.readSeq { + err = errUnexpectedEOF + } + return 0, err +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close frame to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING frame application data. The default +// ping handler sends a pong to the peer. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG frame application data. The default +// pong handler does nothing. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +func FormatCloseMessage(closeCode int, text string) []byte { + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 00000000..c901a7a9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,152 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application uses +// the Upgrade function from an Upgrader object with a HTTP request handler +// to get a pointer to a Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// return +// } +// if err = conn.WriteMessage(messageType, p); err != nil { +// return err +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by sending a close message to the +// peer and returning a *CloseError from the the NextReader, ReadMessage or the +// message Read method. +// +// Connections handle received ping and pong messages by invoking callback +// functions set with SetPingHandler and SetPongHandler methods. The callback +// functions are called from the NextReader, ReadMessage and the message Read +// methods. +// +// The default ping handler sends a pong to the peer. The application's reading +// goroutine can block for a short time while the handler writes the pong data +// to the connection. +// +// The application must read the connection to process ping, pong and close +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON) concurrently and that no more than one goroutine calls the read +// methods (NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, +// SetPingHandler) concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and not equal to the +// Host request header. +// +// An application can allow connections from any origin by specifying a +// function that always returns true: +// +// var upgrader = websocket.Upgrader{ +// CheckOrigin: func(r *http.Request) bool { return true }, +// } +// +// The deprecated Upgrade function does not enforce an origin policy. It's the +// application's responsibility to check the Origin header before calling +// Upgrade. +package websocket diff --git a/vendor/github.com/gorilla/websocket/examples/autobahn/server.go b/vendor/github.com/gorilla/websocket/examples/autobahn/server.go new file mode 100644 index 00000000..d96ac84d --- /dev/null +++ b/vendor/github.com/gorilla/websocket/examples/autobahn/server.go @@ -0,0 +1,246 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Command server is a test server for the Autobahn WebSockets Test Suite. +package main + +import ( + "errors" + "flag" + "github.com/gorilla/websocket" + "io" + "log" + "net/http" + "time" + "unicode/utf8" +) + +var upgrader = websocket.Upgrader{ + ReadBufferSize: 4096, + WriteBufferSize: 4096, + CheckOrigin: func(r *http.Request) bool { + return true + }, +} + +// echoCopy echoes messages from the client using io.Copy. +func echoCopy(w http.ResponseWriter, r *http.Request, writerOnly bool) { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println("Upgrade:", err) + return + } + defer conn.Close() + for { + mt, r, err := conn.NextReader() + if err != nil { + if err != io.EOF { + log.Println("NextReader:", err) + } + return + } + if mt == websocket.TextMessage { + r = &validator{r: r} + } + w, err := conn.NextWriter(mt) + if err != nil { + log.Println("NextWriter:", err) + return + } + if mt == websocket.TextMessage { + r = &validator{r: r} + } + if writerOnly { + _, err = io.Copy(struct{ io.Writer }{w}, r) + } else { + _, err = io.Copy(w, r) + } + if err != nil { + if err == errInvalidUTF8 { + conn.WriteControl(websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), + time.Time{}) + } + log.Println("Copy:", err) + return + } + err = w.Close() + if err != nil { + log.Println("Close:", err) + return + } + } +} + +func echoCopyWriterOnly(w http.ResponseWriter, r *http.Request) { + echoCopy(w, r, true) +} + +func echoCopyFull(w http.ResponseWriter, r *http.Request) { + echoCopy(w, r, false) +} + +// echoReadAll echoes messages from the client by reading the entire message +// with ioutil.ReadAll. +func echoReadAll(w http.ResponseWriter, r *http.Request, writeMessage bool) { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println("Upgrade:", err) + return + } + defer conn.Close() + for { + mt, b, err := conn.ReadMessage() + if err != nil { + if err != io.EOF { + log.Println("NextReader:", err) + } + return + } + if mt == websocket.TextMessage { + if !utf8.Valid(b) { + conn.WriteControl(websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), + time.Time{}) + log.Println("ReadAll: invalid utf8") + } + } + if writeMessage { + err = conn.WriteMessage(mt, b) + if err != nil { + log.Println("WriteMessage:", err) + } + } else { + w, err := conn.NextWriter(mt) + if err != nil { + log.Println("NextWriter:", err) + return + } + if _, err := w.Write(b); err != nil { + log.Println("Writer:", err) + return + } + if err := w.Close(); err != nil { + log.Println("Close:", err) + return + } + } + } +} + +func echoReadAllWriter(w http.ResponseWriter, r *http.Request) { + echoReadAll(w, r, false) +} + +func echoReadAllWriteMessage(w http.ResponseWriter, r *http.Request) { + echoReadAll(w, r, true) +} + +func serveHome(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found.", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + io.WriteString(w, "Echo Server") +} + +var addr = flag.String("addr", ":9000", "http service address") + +func main() { + flag.Parse() + http.HandleFunc("/", serveHome) + http.HandleFunc("/c", echoCopyWriterOnly) + http.HandleFunc("/f", echoCopyFull) + http.HandleFunc("/r", echoReadAllWriter) + http.HandleFunc("/m", echoReadAllWriteMessage) + err := http.ListenAndServe(*addr, nil) + if err != nil { + log.Fatal("ListenAndServe: ", err) + } +} + +type validator struct { + state int + x rune + r io.Reader +} + +var errInvalidUTF8 = errors.New("invalid utf8") + +func (r *validator) Read(p []byte) (int, error) { + n, err := r.r.Read(p) + state := r.state + x := r.x + for _, b := range p[:n] { + state, x = decode(state, x, b) + if state == utf8Reject { + break + } + } + r.state = state + r.x = x + if state == utf8Reject || (err == io.EOF && state != utf8Accept) { + return n, errInvalidUTF8 + } + return n, err +} + +// UTF-8 decoder from http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ +// +// Copyright (c) 2008-2009 Bjoern Hoehrmann +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to +// deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +// sell copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +// IN THE SOFTWARE. +var utf8d = [...]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7f + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9f + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // a0..bf + 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0..df + 0xa, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // e0..ef + 0xb, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // f0..ff + 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 + 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 + 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 + 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // s7..s8 +} + +const ( + utf8Accept = 0 + utf8Reject = 1 +) + +func decode(state int, x rune, b byte) (int, rune) { + t := utf8d[b] + if state != utf8Accept { + x = rune(b&0x3f) | (x << 6) + } else { + x = rune((0xff >> t) & b) + } + state = int(utf8d[256+state*16+int(t)]) + return state, x +} diff --git a/vendor/github.com/gorilla/websocket/examples/chat/conn.go b/vendor/github.com/gorilla/websocket/examples/chat/conn.go new file mode 100644 index 00000000..40fd38c2 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/examples/chat/conn.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "github.com/gorilla/websocket" + "log" + "net/http" + "time" +) + +const ( + // Time allowed to write a message to the peer. + writeWait = 10 * time.Second + + // Time allowed to read the next pong message from the peer. + pongWait = 60 * time.Second + + // Send pings to peer with this period. Must be less than pongWait. + pingPeriod = (pongWait * 9) / 10 + + // Maximum message size allowed from peer. + maxMessageSize = 512 +) + +var upgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, +} + +// connection is an middleman between the websocket connection and the hub. +type connection struct { + // The websocket connection. + ws *websocket.Conn + + // Buffered channel of outbound messages. + send chan []byte +} + +// readPump pumps messages from the websocket connection to the hub. +func (c *connection) readPump() { + defer func() { + h.unregister <- c + c.ws.Close() + }() + c.ws.SetReadLimit(maxMessageSize) + c.ws.SetReadDeadline(time.Now().Add(pongWait)) + c.ws.SetPongHandler(func(string) error { c.ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) + for { + _, message, err := c.ws.ReadMessage() + if err != nil { + if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) { + log.Printf("error: %v", err) + } + break + } + h.broadcast <- message + } +} + +// write writes a message with the given message type and payload. +func (c *connection) write(mt int, payload []byte) error { + c.ws.SetWriteDeadline(time.Now().Add(writeWait)) + return c.ws.WriteMessage(mt, payload) +} + +// writePump pumps messages from the hub to the websocket connection. +func (c *connection) writePump() { + ticker := time.NewTicker(pingPeriod) + defer func() { + ticker.Stop() + c.ws.Close() + }() + for { + select { + case message, ok := <-c.send: + if !ok { + c.write(websocket.CloseMessage, []byte{}) + return + } + if err := c.write(websocket.TextMessage, message); err != nil { + return + } + case <-ticker.C: + if err := c.write(websocket.PingMessage, []byte{}); err != nil { + return + } + } + } +} + +// serveWs handles websocket requests from the peer. +func serveWs(w http.ResponseWriter, r *http.Request) { + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println(err) + return + } + c := &connection{send: make(chan []byte, 256), ws: ws} + h.register <- c + go c.writePump() + c.readPump() +} diff --git a/vendor/github.com/gorilla/websocket/examples/chat/hub.go b/vendor/github.com/gorilla/websocket/examples/chat/hub.go new file mode 100644 index 00000000..449ba753 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/examples/chat/hub.go @@ -0,0 +1,51 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// hub maintains the set of active connections and broadcasts messages to the +// connections. +type hub struct { + // Registered connections. + connections map[*connection]bool + + // Inbound messages from the connections. + broadcast chan []byte + + // Register requests from the connections. + register chan *connection + + // Unregister requests from connections. + unregister chan *connection +} + +var h = hub{ + broadcast: make(chan []byte), + register: make(chan *connection), + unregister: make(chan *connection), + connections: make(map[*connection]bool), +} + +func (h *hub) run() { + for { + select { + case c := <-h.register: + h.connections[c] = true + case c := <-h.unregister: + if _, ok := h.connections[c]; ok { + delete(h.connections, c) + close(c.send) + } + case m := <-h.broadcast: + for c := range h.connections { + select { + case c.send <- m: + default: + close(c.send) + delete(h.connections, c) + } + } + } + } +} diff --git a/vendor/github.com/gorilla/websocket/examples/chat/main.go b/vendor/github.com/gorilla/websocket/examples/chat/main.go new file mode 100644 index 00000000..3c4448d7 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/examples/chat/main.go @@ -0,0 +1,39 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "log" + "net/http" + "text/template" +) + +var addr = flag.String("addr", ":8080", "http service address") +var homeTempl = template.Must(template.ParseFiles("home.html")) + +func serveHome(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + homeTempl.Execute(w, r.Host) +} + +func main() { + flag.Parse() + go h.run() + http.HandleFunc("/", serveHome) + http.HandleFunc("/ws", serveWs) + err := http.ListenAndServe(*addr, nil) + if err != nil { + log.Fatal("ListenAndServe: ", err) + } +} diff --git a/vendor/github.com/gorilla/websocket/examples/command/main.go b/vendor/github.com/gorilla/websocket/examples/command/main.go new file mode 100644 index 00000000..f3f022ed --- /dev/null +++ b/vendor/github.com/gorilla/websocket/examples/command/main.go @@ -0,0 +1,188 @@ +// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "flag" + "io" + "log" + "net/http" + "os" + "os/exec" + "text/template" + "time" + + "github.com/gorilla/websocket" +) + +var ( + addr = flag.String("addr", "127.0.0.1:8080", "http service address") + cmdPath string + homeTempl = template.Must(template.ParseFiles("home.html")) +) + +const ( + // Time allowed to write a message to the peer. + writeWait = 10 * time.Second + + // Maximum message size allowed from peer. + maxMessageSize = 8192 + + // Time allowed to read the next pong message from the peer. + pongWait = 60 * time.Second + + // Send pings to peer with this period. Must be less than pongWait. + pingPeriod = (pongWait * 9) / 10 +) + +func pumpStdin(ws *websocket.Conn, w io.Writer) { + defer ws.Close() + ws.SetReadLimit(maxMessageSize) + ws.SetReadDeadline(time.Now().Add(pongWait)) + ws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) + for { + _, message, err := ws.ReadMessage() + if err != nil { + break + } + message = append(message, '\n') + if _, err := w.Write(message); err != nil { + break + } + } +} + +func pumpStdout(ws *websocket.Conn, r io.Reader, done chan struct{}) { + defer func() { + ws.Close() + close(done) + }() + s := bufio.NewScanner(r) + for s.Scan() { + ws.SetWriteDeadline(time.Now().Add(writeWait)) + if err := ws.WriteMessage(websocket.TextMessage, s.Bytes()); err != nil { + break + } + } + if s.Err() != nil { + log.Println("scan:", s.Err()) + } +} + +func ping(ws *websocket.Conn, done chan struct{}) { + ticker := time.NewTicker(pingPeriod) + defer ticker.Stop() + for { + select { + case <-ticker.C: + if err := ws.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(writeWait)); err != nil { + log.Println("ping:", err) + } + case <-done: + return + } + } +} + +func internalError(ws *websocket.Conn, msg string, err error) { + log.Println(msg, err) + ws.WriteMessage(websocket.TextMessage, []byte("Internal server error.")) +} + +var upgrader = websocket.Upgrader{} + +func serveWs(w http.ResponseWriter, r *http.Request) { + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println("upgrade:", err) + return + } + + defer ws.Close() + + outr, outw, err := os.Pipe() + if err != nil { + internalError(ws, "stdout:", err) + return + } + defer outr.Close() + defer outw.Close() + + inr, inw, err := os.Pipe() + if err != nil { + internalError(ws, "stdin:", err) + return + } + defer inr.Close() + defer inw.Close() + + proc, err := os.StartProcess(cmdPath, flag.Args(), &os.ProcAttr{ + Files: []*os.File{inr, outw, outw}, + }) + if err != nil { + internalError(ws, "start:", err) + return + } + + inr.Close() + outw.Close() + + stdoutDone := make(chan struct{}) + go pumpStdout(ws, outr, stdoutDone) + go ping(ws, stdoutDone) + + pumpStdin(ws, inw) + + // Some commands will exit when stdin is closed. + inw.Close() + + // Other commands need a bonk on the head. + if err := proc.Signal(os.Interrupt); err != nil { + log.Println("inter:", err) + } + + select { + case <-stdoutDone: + case <-time.After(time.Second): + // A bigger bonk on the head. + if err := proc.Signal(os.Kill); err != nil { + log.Println("term:", err) + } + <-stdoutDone + } + + if _, err := proc.Wait(); err != nil { + log.Println("wait:", err) + } +} + +func serveHome(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + homeTempl.Execute(w, r.Host) +} + +func main() { + flag.Parse() + if len(flag.Args()) < 1 { + log.Fatal("must specify at least one argument") + } + var err error + cmdPath, err = exec.LookPath(flag.Args()[0]) + if err != nil { + log.Fatal(err) + } + http.HandleFunc("/", serveHome) + http.HandleFunc("/ws", serveWs) + log.Fatal(http.ListenAndServe(*addr, nil)) +} diff --git a/vendor/github.com/gorilla/websocket/examples/echo/client.go b/vendor/github.com/gorilla/websocket/examples/echo/client.go new file mode 100644 index 00000000..6578094e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/examples/echo/client.go @@ -0,0 +1,81 @@ +// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "flag" + "log" + "net/url" + "os" + "os/signal" + "time" + + "github.com/gorilla/websocket" +) + +var addr = flag.String("addr", "localhost:8080", "http service address") + +func main() { + flag.Parse() + log.SetFlags(0) + + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt) + + u := url.URL{Scheme: "ws", Host: *addr, Path: "/echo"} + log.Printf("connecting to %s", u.String()) + + c, _, err := websocket.DefaultDialer.Dial(u.String(), nil) + if err != nil { + log.Fatal("dial:", err) + } + defer c.Close() + + done := make(chan struct{}) + + go func() { + defer c.Close() + defer close(done) + for { + _, message, err := c.ReadMessage() + if err != nil { + log.Println("read:", err) + return + } + log.Printf("recv: %s", message) + } + }() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + select { + case t := <-ticker.C: + err := c.WriteMessage(websocket.TextMessage, []byte(t.String())) + if err != nil { + log.Println("write:", err) + return + } + case <-interrupt: + log.Println("interrupt") + // To cleanly close a connection, a client should send a close + // frame and wait for the server to close the connection. + err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + if err != nil { + log.Println("write close:", err) + return + } + select { + case <-done: + case <-time.After(time.Second): + } + c.Close() + return + } + } +} diff --git a/vendor/github.com/gorilla/websocket/examples/echo/server.go b/vendor/github.com/gorilla/websocket/examples/echo/server.go new file mode 100644 index 00000000..a685b097 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/examples/echo/server.go @@ -0,0 +1,132 @@ +// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "flag" + "html/template" + "log" + "net/http" + + "github.com/gorilla/websocket" +) + +var addr = flag.String("addr", "localhost:8080", "http service address") + +var upgrader = websocket.Upgrader{} // use default options + +func echo(w http.ResponseWriter, r *http.Request) { + c, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Print("upgrade:", err) + return + } + defer c.Close() + for { + mt, message, err := c.ReadMessage() + if err != nil { + log.Println("read:", err) + break + } + log.Printf("recv: %s", message) + err = c.WriteMessage(mt, message) + if err != nil { + log.Println("write:", err) + break + } + } +} + +func home(w http.ResponseWriter, r *http.Request) { + homeTemplate.Execute(w, "ws://"+r.Host+"/echo") +} + +func main() { + flag.Parse() + log.SetFlags(0) + http.HandleFunc("/echo", echo) + http.HandleFunc("/", home) + log.Fatal(http.ListenAndServe(*addr, nil)) +} + +var homeTemplate = template.Must(template.New("").Parse(` + + + + + + + +
+

Click "Open" to create a connection to the server, +"Send" to send a message to the server and "Close" to close the connection. +You can change the message and send multiple times. +

+

+ + +

+ +

+
+
+
+ + +`)) diff --git a/vendor/github.com/gorilla/websocket/examples/filewatch/main.go b/vendor/github.com/gorilla/websocket/examples/filewatch/main.go new file mode 100644 index 00000000..a2c7b85f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/examples/filewatch/main.go @@ -0,0 +1,193 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "io/ioutil" + "log" + "net/http" + "os" + "strconv" + "text/template" + "time" + + "github.com/gorilla/websocket" +) + +const ( + // Time allowed to write the file to the client. + writeWait = 10 * time.Second + + // Time allowed to read the next pong message from the client. + pongWait = 60 * time.Second + + // Send pings to client with this period. Must be less than pongWait. + pingPeriod = (pongWait * 9) / 10 + + // Poll file for changes with this period. + filePeriod = 10 * time.Second +) + +var ( + addr = flag.String("addr", ":8080", "http service address") + homeTempl = template.Must(template.New("").Parse(homeHTML)) + filename string + upgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, + } +) + +func readFileIfModified(lastMod time.Time) ([]byte, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return nil, lastMod, err + } + if !fi.ModTime().After(lastMod) { + return nil, lastMod, nil + } + p, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fi.ModTime(), err + } + return p, fi.ModTime(), nil +} + +func reader(ws *websocket.Conn) { + defer ws.Close() + ws.SetReadLimit(512) + ws.SetReadDeadline(time.Now().Add(pongWait)) + ws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) + for { + _, _, err := ws.ReadMessage() + if err != nil { + break + } + } +} + +func writer(ws *websocket.Conn, lastMod time.Time) { + lastError := "" + pingTicker := time.NewTicker(pingPeriod) + fileTicker := time.NewTicker(filePeriod) + defer func() { + pingTicker.Stop() + fileTicker.Stop() + ws.Close() + }() + for { + select { + case <-fileTicker.C: + var p []byte + var err error + + p, lastMod, err = readFileIfModified(lastMod) + + if err != nil { + if s := err.Error(); s != lastError { + lastError = s + p = []byte(lastError) + } + } else { + lastError = "" + } + + if p != nil { + ws.SetWriteDeadline(time.Now().Add(writeWait)) + if err := ws.WriteMessage(websocket.TextMessage, p); err != nil { + return + } + } + case <-pingTicker.C: + ws.SetWriteDeadline(time.Now().Add(writeWait)) + if err := ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil { + return + } + } + } +} + +func serveWs(w http.ResponseWriter, r *http.Request) { + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + if _, ok := err.(websocket.HandshakeError); !ok { + log.Println(err) + } + return + } + + var lastMod time.Time + if n, err := strconv.ParseInt(r.FormValue("lastMod"), 16, 64); err != nil { + lastMod = time.Unix(0, n) + } + + go writer(ws, lastMod) + reader(ws) +} + +func serveHome(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + p, lastMod, err := readFileIfModified(time.Time{}) + if err != nil { + p = []byte(err.Error()) + lastMod = time.Unix(0, 0) + } + var v = struct { + Host string + Data string + LastMod string + }{ + r.Host, + string(p), + strconv.FormatInt(lastMod.UnixNano(), 16), + } + homeTempl.Execute(w, &v) +} + +func main() { + flag.Parse() + if flag.NArg() != 1 { + log.Fatal("filename not specified") + } + filename = flag.Args()[0] + http.HandleFunc("/", serveHome) + http.HandleFunc("/ws", serveWs) + if err := http.ListenAndServe(*addr, nil); err != nil { + log.Fatal(err) + } +} + +const homeHTML = ` + + + WebSocket Example + + +
{{.Data}}
+ + + +` diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 00000000..4f0e3687 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON is deprecated, use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v to the connection. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON is deprecated, use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 00000000..8d7137de --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,260 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a default value of 4096 is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is set, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, the host in the Origin header must not be set or + // must match the host of the request. + CheckOrigin func(r *http.Request) bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return u.Host == r.Host +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-Websocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: method not GET") + } + if values := r.Header["Sec-Websocket-Version"]; len(values) == 0 || values[0] != "13" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13") + } + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + var ( + netConn net.Conn + br *bufio.Reader + err error + ) + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var rw *bufio.ReadWriter + netConn, rw, err = h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + br = rw.Reader + + if br.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize) + c.subprotocol = subprotocol + + p := c.writeBuf[:0] + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-Websocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// This function is deprecated, use websocket.Upgrader instead. +// +// The application is responsible for checking the request origin before +// calling Upgrade. An example implementation of the same origin policy is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", 403) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 00000000..ffdc265e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,44 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" +) + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains token. +func tokenListContainsValue(header http.Header, name string, value string) bool { + for _, v := range header[name] { + for _, s := range strings.Split(v, ",") { + if strings.EqualFold(value, strings.TrimSpace(s)) { + return true + } + } + } + return false +} + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} diff --git a/vendor/github.com/jpillora/backoff/backoff.go b/vendor/github.com/jpillora/backoff/backoff.go new file mode 100644 index 00000000..c8ea03a6 --- /dev/null +++ b/vendor/github.com/jpillora/backoff/backoff.go @@ -0,0 +1,69 @@ +package backoff + +import ( + "math" + "math/rand" + "time" +) + +//Backoff is a time.Duration counter. It starts at Min. +//After every call to Duration() it is multiplied by Factor. +//It is capped at Max. It returns to Min on every call to Reset(). +//Used in conjunction with the time package. +// +// Backoff is not threadsafe, but the ForAttempt method can be +// used concurrently if non-zero values for Factor, Max, and Min +// are set on the Backoff shared among threads. +type Backoff struct { + //Factor is the multiplying factor for each increment step + attempts, Factor float64 + //Jitter eases contention by randomizing backoff steps + Jitter bool + //Min and Max are the minimum and maximum values of the counter + Min, Max time.Duration +} + +//Returns the current value of the counter and then +//multiplies it Factor +func (b *Backoff) Duration() time.Duration { + d := b.ForAttempt(b.attempts) + b.attempts++ + return d +} + +// ForAttempt returns the duration for a specific attempt. This is useful if +// you have a large number of independent Backoffs, but don't want use +// unnecessary memory storing the Backoff parameters per Backoff. The first +// attempt should be 0. +// +// ForAttempt is threadsafe iff non-zero values for Factor, Max, and Min +// are set before any calls to ForAttempt are made. +func (b *Backoff) ForAttempt(attempt float64) time.Duration { + //Zero-values are nonsensical, so we use + //them to apply defaults + if b.Min == 0 { + b.Min = 100 * time.Millisecond + } + if b.Max == 0 { + b.Max = 10 * time.Second + } + if b.Factor == 0 { + b.Factor = 2 + } + //calculate this duration + dur := float64(b.Min) * math.Pow(b.Factor, attempt) + if b.Jitter == true { + dur = rand.Float64()*(dur-float64(b.Min)) + float64(b.Min) + } + //cap! + if dur > float64(b.Max) { + return b.Max + } + //return as a time.Duration + return time.Duration(dur) +} + +//Resets the current value of the counter back to Min +func (b *Backoff) Reset() { + b.attempts = 0 +} diff --git a/vendor/github.com/mattermost/platform/model/LICENSE.txt b/vendor/github.com/mattermost/platform/model/LICENSE.txt new file mode 100644 index 00000000..b05ccb40 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/LICENSE.txt @@ -0,0 +1,897 @@ +Mattermost Licensing + +SOFTWARE LICENSING + +You are licensed to use compiled versions of the Mattermost platform produced by Mattermost, Inc. under an MIT LICENSE + +- See MIT-COMPILED-LICENSE.md included in compiled versions for details + +You may be licensed to use source code to create compiled versions not produced by Mattermost, Inc. in one of two ways: + +1. Under the Free Software Foundation’s GNU AGPL v.3.0, subject to the exceptions outlined in this policy; or +2. Under a commercial license available from Mattermost, Inc. by contacting commercial@mattermost.com + +You are licensed to use the source code in Admin Tools and Configuration Files (api/templates/, config/, model/, +web/react/utils/, web/static/, web/templates/ and all subdirectories thereof) under the Apache License v2.0. + +We promise that we will not enforce the copyleft provisions in AGPL v3.0 against you if your application (a) does not +link to the Mattermost Platform directly, but exclusively uses the Mattermost Admin Tools and Configuration Files, and +(b) you have not modified, added to or adapted the source code of Mattermost in a way that results in the creation of +a “modified version” or “work based on” Mattermost as these terms are defined in the AGPL v3.0 license. + +MATTERMOST TRADEMARK GUIDELINES + +Your use of the mark Mattermost is subject to Mattermost, Inc's prior written approval and our organization’s Trademark +Standards of Use at http://www.mattermost.org/trademark-standards-of-use/. For trademark approval or any questions +you have about using these trademarks, please email trademark@mattermost.com + +------------------------------------------------------------------------------------------------------------------------------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------ + +The software is released under the terms of the GNU Affero General Public +License, version 3. + + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/vendor/github.com/mattermost/platform/model/access.go b/vendor/github.com/mattermost/platform/model/access.go new file mode 100644 index 00000000..877b3c4f --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/access.go @@ -0,0 +1,93 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + ACCESS_TOKEN_GRANT_TYPE = "authorization_code" + ACCESS_TOKEN_TYPE = "bearer" + REFRESH_TOKEN_GRANT_TYPE = "refresh_token" +) + +type AccessData struct { + AuthCode string `json:"auth_code"` + Token string `json:"token"` + RefreshToken string `json:"refresh_token"` + RedirectUri string `json:"redirect_uri"` +} + +type AccessResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int32 `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} + +// IsValid validates the AccessData and returns an error if it isn't configured +// correctly. +func (ad *AccessData) IsValid() *AppError { + + if len(ad.AuthCode) == 0 || len(ad.AuthCode) > 128 { + return NewLocAppError("AccessData.IsValid", "model.access.is_valid.auth_code.app_error", nil, "") + } + + if len(ad.Token) != 26 { + return NewLocAppError("AccessData.IsValid", "model.access.is_valid.access_token.app_error", nil, "") + } + + if len(ad.RefreshToken) > 26 { + return NewLocAppError("AccessData.IsValid", "model.access.is_valid.refresh_token.app_error", nil, "") + } + + if len(ad.RedirectUri) > 256 { + return NewLocAppError("AccessData.IsValid", "model.access.is_valid.redirect_uri.app_error", nil, "") + } + + return nil +} + +func (ad *AccessData) ToJson() string { + b, err := json.Marshal(ad) + if err != nil { + return "" + } else { + return string(b) + } +} + +func AccessDataFromJson(data io.Reader) *AccessData { + decoder := json.NewDecoder(data) + var ad AccessData + err := decoder.Decode(&ad) + if err == nil { + return &ad + } else { + return nil + } +} + +func (ar *AccessResponse) ToJson() string { + b, err := json.Marshal(ar) + if err != nil { + return "" + } else { + return string(b) + } +} + +func AccessResponseFromJson(data io.Reader) *AccessResponse { + decoder := json.NewDecoder(data) + var ar AccessResponse + err := decoder.Decode(&ar) + if err == nil { + return &ar + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/analytics_row.go b/vendor/github.com/mattermost/platform/model/analytics_row.go new file mode 100644 index 00000000..ed1d69dd --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/analytics_row.go @@ -0,0 +1,55 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type AnalyticsRow struct { + Name string `json:"name"` + Value float64 `json:"value"` +} + +type AnalyticsRows []*AnalyticsRow + +func (me *AnalyticsRow) ToJson() string { + b, err := json.Marshal(me) + if err != nil { + return "" + } else { + return string(b) + } +} + +func AnalyticsRowFromJson(data io.Reader) *AnalyticsRow { + decoder := json.NewDecoder(data) + var me AnalyticsRow + err := decoder.Decode(&me) + if err == nil { + return &me + } else { + return nil + } +} + +func (me AnalyticsRows) ToJson() string { + if b, err := json.Marshal(me); err != nil { + return "[]" + } else { + return string(b) + } +} + +func AnalyticsRowsFromJson(data io.Reader) AnalyticsRows { + decoder := json.NewDecoder(data) + var me AnalyticsRows + err := decoder.Decode(&me) + if err == nil { + return me + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/audit.go b/vendor/github.com/mattermost/platform/model/audit.go new file mode 100644 index 00000000..8fa1d558 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/audit.go @@ -0,0 +1,39 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type Audit struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UserId string `json:"user_id"` + Action string `json:"action"` + ExtraInfo string `json:"extra_info"` + IpAddress string `json:"ip_address"` + SessionId string `json:"session_id"` +} + +func (o *Audit) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func AuditFromJson(data io.Reader) *Audit { + decoder := json.NewDecoder(data) + var o Audit + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/audits.go b/vendor/github.com/mattermost/platform/model/audits.go new file mode 100644 index 00000000..36c80629 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/audits.go @@ -0,0 +1,39 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type Audits []Audit + +func (o Audits) Etag() string { + if len(o) > 0 { + // the first in the list is always the most current + return Etag(o[0].CreateAt) + } else { + return "" + } +} + +func (o Audits) ToJson() string { + if b, err := json.Marshal(o); err != nil { + return "[]" + } else { + return string(b) + } +} + +func AuditsFromJson(data io.Reader) Audits { + decoder := json.NewDecoder(data) + var o Audits + err := decoder.Decode(&o) + if err == nil { + return o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/authorize.go b/vendor/github.com/mattermost/platform/model/authorize.go new file mode 100644 index 00000000..e0d665ba --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/authorize.go @@ -0,0 +1,103 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + AUTHCODE_EXPIRE_TIME = 60 * 10 // 10 minutes + AUTHCODE_RESPONSE_TYPE = "code" +) + +type AuthData struct { + ClientId string `json:"client_id"` + UserId string `json:"user_id"` + Code string `json:"code"` + ExpiresIn int32 `json:"expires_in"` + CreateAt int64 `json:"create_at"` + RedirectUri string `json:"redirect_uri"` + State string `json:"state"` + Scope string `json:"scope"` +} + +// IsValid validates the AuthData and returns an error if it isn't configured +// correctly. +func (ad *AuthData) IsValid() *AppError { + + if len(ad.ClientId) != 26 { + return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.client_id.app_error", nil, "") + } + + if len(ad.UserId) != 26 { + return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.user_id.app_error", nil, "") + } + + if len(ad.Code) == 0 || len(ad.Code) > 128 { + return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.auth_code.app_error", nil, "client_id="+ad.ClientId) + } + + if ad.ExpiresIn == 0 { + return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.expires.app_error", nil, "") + } + + if ad.CreateAt <= 0 { + return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.create_at.app_error", nil, "client_id="+ad.ClientId) + } + + if len(ad.RedirectUri) > 256 { + return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.redirect_uri.app_error", nil, "client_id="+ad.ClientId) + } + + if len(ad.State) > 128 { + return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.state.app_error", nil, "client_id="+ad.ClientId) + } + + if len(ad.Scope) > 128 { + return NewLocAppError("AuthData.IsValid", "model.authorize.is_valid.scope.app_error", nil, "client_id="+ad.ClientId) + } + + return nil +} + +func (ad *AuthData) PreSave() { + if ad.ExpiresIn == 0 { + ad.ExpiresIn = AUTHCODE_EXPIRE_TIME + } + + if ad.CreateAt == 0 { + ad.CreateAt = GetMillis() + } +} + +func (ad *AuthData) ToJson() string { + b, err := json.Marshal(ad) + if err != nil { + return "" + } else { + return string(b) + } +} + +func AuthDataFromJson(data io.Reader) *AuthData { + decoder := json.NewDecoder(data) + var ad AuthData + err := decoder.Decode(&ad) + if err == nil { + return &ad + } else { + return nil + } +} + +func (ad *AuthData) IsExpired() bool { + + if GetMillis() > ad.CreateAt+int64(ad.ExpiresIn*1000) { + return true + } + + return false +} diff --git a/vendor/github.com/mattermost/platform/model/channel.go b/vendor/github.com/mattermost/platform/model/channel.go new file mode 100644 index 00000000..e7002e3c --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/channel.go @@ -0,0 +1,136 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "unicode/utf8" +) + +const ( + CHANNEL_OPEN = "O" + CHANNEL_PRIVATE = "P" + CHANNEL_DIRECT = "D" + DEFAULT_CHANNEL = "town-square" +) + +type Channel struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + TeamId string `json:"team_id"` + Type string `json:"type"` + DisplayName string `json:"display_name"` + Name string `json:"name"` + Header string `json:"header"` + Purpose string `json:"purpose"` + LastPostAt int64 `json:"last_post_at"` + TotalMsgCount int64 `json:"total_msg_count"` + ExtraUpdateAt int64 `json:"extra_update_at"` + CreatorId string `json:"creator_id"` +} + +func (o *Channel) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func ChannelFromJson(data io.Reader) *Channel { + decoder := json.NewDecoder(data) + var o Channel + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func (o *Channel) Etag() string { + return Etag(o.Id, o.UpdateAt) +} + +func (o *Channel) ExtraEtag(memberLimit int) string { + return Etag(o.Id, o.ExtraUpdateAt, memberLimit) +} + +func (o *Channel) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewLocAppError("Channel.IsValid", "model.channel.is_valid.id.app_error", nil, "") + } + + if o.CreateAt == 0 { + return NewLocAppError("Channel.IsValid", "model.channel.is_valid.create_at.app_error", nil, "id="+o.Id) + } + + if o.UpdateAt == 0 { + return NewLocAppError("Channel.IsValid", "model.channel.is_valid.update_at.app_error", nil, "id="+o.Id) + } + + if utf8.RuneCountInString(o.DisplayName) > 64 { + return NewLocAppError("Channel.IsValid", "model.channel.is_valid.display_name.app_error", nil, "id="+o.Id) + } + + if len(o.Name) > 64 { + return NewLocAppError("Channel.IsValid", "model.channel.is_valid.name.app_error", nil, "id="+o.Id) + } + + if !IsValidChannelIdentifier(o.Name) { + return NewLocAppError("Channel.IsValid", "model.channel.is_valid.2_or_more.app_error", nil, "id="+o.Id) + } + + if !(o.Type == CHANNEL_OPEN || o.Type == CHANNEL_PRIVATE || o.Type == CHANNEL_DIRECT) { + return NewLocAppError("Channel.IsValid", "model.channel.is_valid.type.app_error", nil, "id="+o.Id) + } + + if utf8.RuneCountInString(o.Header) > 1024 { + return NewLocAppError("Channel.IsValid", "model.channel.is_valid.header.app_error", nil, "id="+o.Id) + } + + if utf8.RuneCountInString(o.Purpose) > 128 { + return NewLocAppError("Channel.IsValid", "model.channel.is_valid.purpose.app_error", nil, "id="+o.Id) + } + + if len(o.CreatorId) > 26 { + return NewLocAppError("Channel.IsValid", "model.channel.is_valid.creator_id.app_error", nil, "") + } + + return nil +} + +func (o *Channel) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + o.CreateAt = GetMillis() + o.UpdateAt = o.CreateAt + o.ExtraUpdateAt = o.CreateAt +} + +func (o *Channel) PreUpdate() { + o.UpdateAt = GetMillis() +} + +func (o *Channel) ExtraUpdated() { + o.ExtraUpdateAt = GetMillis() +} + +func (o *Channel) PreExport() { +} + +func GetDMNameFromIds(userId1, userId2 string) string { + if userId1 > userId2 { + return userId2 + "__" + userId1 + } else { + return userId1 + "__" + userId2 + } +} diff --git a/vendor/github.com/mattermost/platform/model/channel_count.go b/vendor/github.com/mattermost/platform/model/channel_count.go new file mode 100644 index 00000000..6cc1b2f2 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/channel_count.go @@ -0,0 +1,63 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "crypto/md5" + "encoding/json" + "fmt" + "io" + "sort" + "strconv" +) + +type ChannelCounts struct { + Counts map[string]int64 `json:"counts"` + UpdateTimes map[string]int64 `json:"update_times"` +} + +func (o *ChannelCounts) Etag() string { + + ids := []string{} + for id := range o.Counts { + ids = append(ids, id) + } + sort.Strings(ids) + + str := "" + for _, id := range ids { + str += id + strconv.FormatInt(o.Counts[id], 10) + } + + md5Counts := fmt.Sprintf("%x", md5.Sum([]byte(str))) + + var update int64 = 0 + for _, u := range o.UpdateTimes { + if u > update { + update = u + } + } + + return Etag(md5Counts, update) +} + +func (o *ChannelCounts) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func ChannelCountsFromJson(data io.Reader) *ChannelCounts { + decoder := json.NewDecoder(data) + var o ChannelCounts + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/channel_data.go b/vendor/github.com/mattermost/platform/model/channel_data.go new file mode 100644 index 00000000..731d50e7 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/channel_data.go @@ -0,0 +1,43 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type ChannelData struct { + Channel *Channel `json:"channel"` + Member *ChannelMember `json:"member"` +} + +func (o *ChannelData) Etag() string { + var mt int64 = 0 + if o.Member != nil { + mt = o.Member.LastUpdateAt + } + + return Etag(o.Channel.Id, o.Channel.UpdateAt, o.Channel.LastPostAt, mt) +} + +func (o *ChannelData) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func ChannelDataFromJson(data io.Reader) *ChannelData { + decoder := json.NewDecoder(data) + var o ChannelData + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/channel_extra.go b/vendor/github.com/mattermost/platform/model/channel_extra.go new file mode 100644 index 00000000..55da588a --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/channel_extra.go @@ -0,0 +1,49 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type ExtraMember struct { + Id string `json:"id"` + Nickname string `json:"nickname"` + Email string `json:"email"` + Roles string `json:"roles"` + Username string `json:"username"` +} + +func (o *ExtraMember) Sanitize(options map[string]bool) { + if len(options) == 0 || !options["email"] { + o.Email = "" + } +} + +type ChannelExtra struct { + Id string `json:"id"` + Members []ExtraMember `json:"members"` + MemberCount int64 `json:"member_count"` +} + +func (o *ChannelExtra) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func ChannelExtraFromJson(data io.Reader) *ChannelExtra { + decoder := json.NewDecoder(data) + var o ChannelExtra + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/channel_list.go b/vendor/github.com/mattermost/platform/model/channel_list.go new file mode 100644 index 00000000..49ba384a --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/channel_list.go @@ -0,0 +1,77 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type ChannelList struct { + Channels []*Channel `json:"channels"` + Members map[string]*ChannelMember `json:"members"` +} + +func (o *ChannelList) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func (o *ChannelList) Etag() string { + + id := "0" + var t int64 = 0 + var delta int64 = 0 + + for _, v := range o.Channels { + if v.LastPostAt > t { + t = v.LastPostAt + id = v.Id + } + + if v.UpdateAt > t { + t = v.UpdateAt + id = v.Id + } + + member := o.Members[v.Id] + + if member != nil { + max := v.LastPostAt + if v.UpdateAt > max { + max = v.UpdateAt + } + + delta += max - member.LastViewedAt + + if member.LastViewedAt > t { + t = member.LastViewedAt + id = v.Id + } + + if member.LastUpdateAt > t { + t = member.LastUpdateAt + id = v.Id + } + + } + } + + return Etag(id, t, delta, len(o.Channels)) +} + +func ChannelListFromJson(data io.Reader) *ChannelList { + decoder := json.NewDecoder(data) + var o ChannelList + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/channel_member.go b/vendor/github.com/mattermost/platform/model/channel_member.go new file mode 100644 index 00000000..66e20da6 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/channel_member.go @@ -0,0 +1,108 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "strings" +) + +const ( + CHANNEL_ROLE_ADMIN = "admin" + CHANNEL_NOTIFY_DEFAULT = "default" + CHANNEL_NOTIFY_ALL = "all" + CHANNEL_NOTIFY_MENTION = "mention" + CHANNEL_NOTIFY_NONE = "none" + CHANNEL_MARK_UNREAD_ALL = "all" + CHANNEL_MARK_UNREAD_MENTION = "mention" +) + +type ChannelMember struct { + ChannelId string `json:"channel_id"` + UserId string `json:"user_id"` + Roles string `json:"roles"` + LastViewedAt int64 `json:"last_viewed_at"` + MsgCount int64 `json:"msg_count"` + MentionCount int64 `json:"mention_count"` + NotifyProps StringMap `json:"notify_props"` + LastUpdateAt int64 `json:"last_update_at"` +} + +func (o *ChannelMember) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func ChannelMemberFromJson(data io.Reader) *ChannelMember { + decoder := json.NewDecoder(data) + var o ChannelMember + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func (o *ChannelMember) IsValid() *AppError { + + if len(o.ChannelId) != 26 { + return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.channel_id.app_error", nil, "") + } + + if len(o.UserId) != 26 { + return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.user_id.app_error", nil, "") + } + + for _, role := range strings.Split(o.Roles, " ") { + if !(role == "" || role == CHANNEL_ROLE_ADMIN) { + return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.role.app_error", nil, "role="+role) + } + } + + notifyLevel := o.NotifyProps["desktop"] + if len(notifyLevel) > 20 || !IsChannelNotifyLevelValid(notifyLevel) { + return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.notify_level.app_error", + nil, "notify_level="+notifyLevel) + } + + markUnreadLevel := o.NotifyProps["mark_unread"] + if len(markUnreadLevel) > 20 || !IsChannelMarkUnreadLevelValid(markUnreadLevel) { + return NewLocAppError("ChannelMember.IsValid", "model.channel_member.is_valid.unread_level.app_error", + nil, "mark_unread_level="+markUnreadLevel) + } + + return nil +} + +func (o *ChannelMember) PreSave() { + o.LastUpdateAt = GetMillis() +} + +func (o *ChannelMember) PreUpdate() { + o.LastUpdateAt = GetMillis() +} + +func IsChannelNotifyLevelValid(notifyLevel string) bool { + return notifyLevel == CHANNEL_NOTIFY_DEFAULT || + notifyLevel == CHANNEL_NOTIFY_ALL || + notifyLevel == CHANNEL_NOTIFY_MENTION || + notifyLevel == CHANNEL_NOTIFY_NONE +} + +func IsChannelMarkUnreadLevelValid(markUnreadLevel string) bool { + return markUnreadLevel == CHANNEL_MARK_UNREAD_ALL || markUnreadLevel == CHANNEL_MARK_UNREAD_MENTION +} + +func GetDefaultChannelNotifyProps() StringMap { + return StringMap{ + "desktop": CHANNEL_NOTIFY_DEFAULT, + "mark_unread": CHANNEL_MARK_UNREAD_ALL, + } +} diff --git a/vendor/github.com/mattermost/platform/model/client.go b/vendor/github.com/mattermost/platform/model/client.go new file mode 100644 index 00000000..fba4246e --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/client.go @@ -0,0 +1,1144 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "bytes" + "fmt" + l4g "github.com/alecthomas/log4go" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +const ( + HEADER_REQUEST_ID = "X-Request-ID" + HEADER_VERSION_ID = "X-Version-ID" + HEADER_ETAG_SERVER = "ETag" + HEADER_ETAG_CLIENT = "If-None-Match" + HEADER_FORWARDED = "X-Forwarded-For" + HEADER_REAL_IP = "X-Real-IP" + HEADER_FORWARDED_PROTO = "X-Forwarded-Proto" + HEADER_TOKEN = "token" + HEADER_BEARER = "BEARER" + HEADER_AUTH = "Authorization" + HEADER_REQUESTED_WITH = "X-Requested-With" + HEADER_REQUESTED_WITH_XML = "XMLHttpRequest" + API_URL_SUFFIX = "/api/v1" +) + +type Result struct { + RequestId string + Etag string + Data interface{} +} + +type Client struct { + Url string // The location of the server like "http://localhost:8065" + ApiUrl string // The api location of the server like "http://localhost:8065/api/v1" + HttpClient *http.Client // The http client + AuthToken string + AuthType string +} + +// NewClient constructs a new client with convienence methods for talking to +// the server. +func NewClient(url string) *Client { + return &Client{url, url + API_URL_SUFFIX, &http.Client{}, "", ""} +} + +func (c *Client) DoPost(url, data, contentType string) (*http.Response, *AppError) { + rq, _ := http.NewRequest("POST", c.Url+url, strings.NewReader(data)) + rq.Header.Set("Content-Type", contentType) + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + } else if rp.StatusCode >= 300 { + return nil, AppErrorFromJson(rp.Body) + } else { + return rp, nil + } +} + +func (c *Client) DoApiPost(url string, data string) (*http.Response, *AppError) { + rq, _ := http.NewRequest("POST", c.ApiUrl+url, strings.NewReader(data)) + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + } else if rp.StatusCode >= 300 { + return nil, AppErrorFromJson(rp.Body) + } else { + return rp, nil + } +} + +func (c *Client) DoApiGet(url string, data string, etag string) (*http.Response, *AppError) { + rq, _ := http.NewRequest("GET", c.ApiUrl+url, strings.NewReader(data)) + + if len(etag) > 0 { + rq.Header.Set(HEADER_ETAG_CLIENT, etag) + } + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, c.AuthType+" "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + } else if rp.StatusCode == 304 { + return rp, nil + } else if rp.StatusCode >= 300 { + return rp, AppErrorFromJson(rp.Body) + } else { + return rp, nil + } +} + +func getCookie(name string, resp *http.Response) *http.Cookie { + for _, cookie := range resp.Cookies() { + if cookie.Name == name { + return cookie + } + } + + return nil +} + +func (c *Client) Must(result *Result, err *AppError) *Result { + if err != nil { + l4g.Close() + time.Sleep(time.Second) + panic(err) + } + + return result +} + +func (c *Client) SignupTeam(email string, displayName string) (*Result, *AppError) { + m := make(map[string]string) + m["email"] = email + m["display_name"] = displayName + if r, err := c.DoApiPost("/teams/signup", MapToJson(m)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateTeamFromSignup(teamSignup *TeamSignup) (*Result, *AppError) { + if r, err := c.DoApiPost("/teams/create_from_signup", teamSignup.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamSignupFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateTeam(team *Team) (*Result, *AppError) { + if r, err := c.DoApiPost("/teams/create", team.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil + } +} + +func (c *Client) GetAllTeams() (*Result, *AppError) { + if r, err := c.DoApiGet("/teams/all", "", ""); err != nil { + return nil, err + } else { + + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamMapFromJson(r.Body)}, nil + } +} + +func (c *Client) FindTeamByName(name string, allServers bool) (*Result, *AppError) { + m := make(map[string]string) + m["name"] = name + m["all"] = fmt.Sprintf("%v", allServers) + if r, err := c.DoApiPost("/teams/find_team_by_name", MapToJson(m)); err != nil { + return nil, err + } else { + val := false + if body, _ := ioutil.ReadAll(r.Body); string(body) == "true" { + val = true + } + + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), val}, nil + } +} + +func (c *Client) InviteMembers(invites *Invites) (*Result, *AppError) { + if r, err := c.DoApiPost("/teams/invite_members", invites.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), InvitesFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateTeam(team *Team) (*Result, *AppError) { + if r, err := c.DoApiPost("/teams/update", team.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateUser(user *User, hash string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/create", user.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateUserFromSignup(user *User, data string, hash string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/create?d="+url.QueryEscape(data)+"&h="+hash, user.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) GetUser(id string, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/users/"+id, "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) GetMe(etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/users/me", "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) GetProfiles(teamId string, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/users/profiles/"+teamId, "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserMapFromJson(r.Body)}, nil + } +} + +func (c *Client) LoginById(id string, password string) (*Result, *AppError) { + m := make(map[string]string) + m["id"] = id + m["password"] = password + return c.login(m) +} + +func (c *Client) LoginByEmail(name string, email string, password string) (*Result, *AppError) { + m := make(map[string]string) + m["name"] = name + m["email"] = email + m["password"] = password + return c.login(m) +} + +func (c *Client) LoginByUsername(name string, username string, password string) (*Result, *AppError) { + m := make(map[string]string) + m["name"] = name + m["username"] = username + m["password"] = password + return c.login(m) +} + +func (c *Client) LoginByEmailWithDevice(name string, email string, password string, deviceId string) (*Result, *AppError) { + m := make(map[string]string) + m["name"] = name + m["email"] = email + m["password"] = password + m["device_id"] = deviceId + return c.login(m) +} + +func (c *Client) login(m map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/login", MapToJson(m)); err != nil { + return nil, err + } else { + c.AuthToken = r.Header.Get(HEADER_TOKEN) + c.AuthType = HEADER_BEARER + sessionToken := getCookie(SESSION_COOKIE_TOKEN, r) + + if c.AuthToken != sessionToken.Value { + NewLocAppError("/users/login", "model.client.login.app_error", nil, "") + } + + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) Logout() (*Result, *AppError) { + if r, err := c.DoApiPost("/users/logout", ""); err != nil { + return nil, err + } else { + c.AuthToken = "" + c.AuthType = HEADER_BEARER + + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) CheckMfa(method, teamName, loginId string) (*Result, *AppError) { + m := make(map[string]string) + m["method"] = method + m["team_name"] = teamName + m["login_id"] = loginId + + if r, err := c.DoApiPost("/users/mfa", MapToJson(m)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GenerateMfaQrCode() (*Result, *AppError) { + if r, err := c.DoApiGet("/users/generate_mfa_qr", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), r.Body}, nil + } +} + +func (c *Client) UpdateMfa(activate bool, token string) (*Result, *AppError) { + m := make(map[string]interface{}) + m["activate"] = activate + m["token"] = token + + if r, err := c.DoApiPost("/users/update_mfa", StringInterfaceToJson(m)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) SetOAuthToken(token string) { + c.AuthToken = token + c.AuthType = HEADER_TOKEN +} + +func (c *Client) ClearOAuthToken() { + c.AuthToken = "" + c.AuthType = HEADER_BEARER +} + +func (c *Client) RevokeSession(sessionAltId string) (*Result, *AppError) { + m := make(map[string]string) + m["id"] = sessionAltId + + if r, err := c.DoApiPost("/users/revoke_session", MapToJson(m)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetSessions(id string) (*Result, *AppError) { + if r, err := c.DoApiGet("/users/"+id+"/sessions", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), SessionsFromJson(r.Body)}, nil + } +} + +func (c *Client) EmailToOAuth(m map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/claim/email_to_sso", MapToJson(m)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) OAuthToEmail(m map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/claim/oauth_to_email", MapToJson(m)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) LDAPToEmail(m map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/claim/ldap_to_email", MapToJson(m)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) EmailToLDAP(m map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/claim/ldap_to_email", MapToJson(m)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) Command(channelId string, command string, suggest bool) (*Result, *AppError) { + m := make(map[string]string) + m["command"] = command + m["channelId"] = channelId + m["suggest"] = strconv.FormatBool(suggest) + if r, err := c.DoApiPost("/commands/execute", MapToJson(m)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandResponseFromJson(r.Body)}, nil + } +} + +func (c *Client) ListCommands() (*Result, *AppError) { + if r, err := c.DoApiGet("/commands/list", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandListFromJson(r.Body)}, nil + } +} + +func (c *Client) ListTeamCommands() (*Result, *AppError) { + if r, err := c.DoApiGet("/commands/list_team_commands", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandListFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateCommand(cmd *Command) (*Result, *AppError) { + if r, err := c.DoApiPost("/commands/create", cmd.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandFromJson(r.Body)}, nil + } +} + +func (c *Client) RegenCommandToken(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/commands/regen_token", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CommandFromJson(r.Body)}, nil + } +} + +func (c *Client) DeleteCommand(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/commands/delete", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetAudits(id string, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/users/"+id+"/audits", "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), AuditsFromJson(r.Body)}, nil + } +} + +func (c *Client) GetLogs() (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/logs", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ArrayFromJson(r.Body)}, nil + } +} + +func (c *Client) GetAllAudits() (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/audits", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), AuditsFromJson(r.Body)}, nil + } +} + +func (c *Client) GetClientProperties() (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/client_props", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetConfig() (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/config", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ConfigFromJson(r.Body)}, nil + } +} + +func (c *Client) SaveConfig(config *Config) (*Result, *AppError) { + if r, err := c.DoApiPost("/admin/save_config", config.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ConfigFromJson(r.Body)}, nil + } +} + +func (c *Client) TestEmail(config *Config) (*Result, *AppError) { + if r, err := c.DoApiPost("/admin/test_email", config.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetComplianceReports() (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/compliance_reports", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), CompliancesFromJson(r.Body)}, nil + } +} + +func (c *Client) SaveComplianceReport(job *Compliance) (*Result, *AppError) { + if r, err := c.DoApiPost("/admin/save_compliance_report", job.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ComplianceFromJson(r.Body)}, nil + } +} + +func (c *Client) DownloadComplianceReport(id string) (*Result, *AppError) { + var rq *http.Request + rq, _ = http.NewRequest("GET", c.ApiUrl+"/admin/download_compliance_report/"+id, nil) + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewLocAppError("/admin/download_compliance_report", "model.client.connecting.app_error", nil, err.Error()) + } else if rp.StatusCode >= 300 { + return nil, AppErrorFromJson(rp.Body) + } else { + return &Result{rp.Header.Get(HEADER_REQUEST_ID), + rp.Header.Get(HEADER_ETAG_SERVER), rp.Body}, nil + } +} + +func (c *Client) GetTeamAnalytics(teamId, name string) (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/analytics/"+teamId+"/"+name, "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), AnalyticsRowsFromJson(r.Body)}, nil + } +} + +func (c *Client) GetSystemAnalytics(name string) (*Result, *AppError) { + if r, err := c.DoApiGet("/admin/analytics/"+name, "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), AnalyticsRowsFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateChannel(channel *Channel) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/create", channel.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateDirectChannel(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/create_direct", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateChannel(channel *Channel) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/update", channel.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateChannelHeader(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/update_header", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateChannelPurpose(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/update_purpose", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateNotifyProps(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/update_notify_props", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetChannels(etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/channels/", "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetChannel(id, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/channels/"+id+"/", "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelDataFromJson(r.Body)}, nil + } +} + +func (c *Client) GetMoreChannels(etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/channels/more", "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetChannelCounts(etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/channels/counts", "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelCountsFromJson(r.Body)}, nil + } +} + +func (c *Client) JoinChannel(id string) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/"+id+"/join", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) LeaveChannel(id string) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/"+id+"/leave", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) DeleteChannel(id string) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/"+id+"/delete", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) AddChannelMember(id, user_id string) (*Result, *AppError) { + data := make(map[string]string) + data["user_id"] = user_id + if r, err := c.DoApiPost("/channels/"+id+"/add", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) RemoveChannelMember(id, user_id string) (*Result, *AppError) { + data := make(map[string]string) + data["user_id"] = user_id + if r, err := c.DoApiPost("/channels/"+id+"/remove", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) UpdateLastViewedAt(channelId string) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/"+channelId+"/update_last_viewed_at", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) GetChannelExtraInfo(id string, memberLimit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/channels/"+id+"/extra_info/"+strconv.FormatInt(int64(memberLimit), 10), "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), ChannelExtraFromJson(r.Body)}, nil + } +} + +func (c *Client) CreatePost(post *Post) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/"+post.ChannelId+"/create", post.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdatePost(post *Post) (*Result, *AppError) { + if r, err := c.DoApiPost("/channels/"+post.ChannelId+"/update", post.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPosts(channelId string, offset int, limit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/channels/%v/posts/%v/%v", channelId, offset, limit), "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPostsSince(channelId string, time int64) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/channels/%v/posts/%v", channelId, time), "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPostsBefore(channelId string, postid string, offset int, limit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/channels/%v/post/%v/before/%v/%v", channelId, postid, offset, limit), "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPostsAfter(channelId string, postid string, offset int, limit int, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/channels/%v/post/%v/after/%v/%v", channelId, postid, offset, limit), "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPost(channelId string, postId string, etag string) (*Result, *AppError) { + if r, err := c.DoApiGet(fmt.Sprintf("/channels/%v/post/%v", channelId, postId), "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) DeletePost(channelId string, postId string) (*Result, *AppError) { + if r, err := c.DoApiPost(fmt.Sprintf("/channels/%v/post/%v/delete", channelId, postId), ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) SearchPosts(terms string) (*Result, *AppError) { + if r, err := c.DoApiGet("/posts/search?terms="+url.QueryEscape(terms), "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), PostListFromJson(r.Body)}, nil + } +} + +func (c *Client) UploadFile(url string, data []byte, contentType string) (*Result, *AppError) { + rq, _ := http.NewRequest("POST", c.ApiUrl+url, bytes.NewReader(data)) + rq.Header.Set("Content-Type", contentType) + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + } else if rp.StatusCode >= 300 { + return nil, AppErrorFromJson(rp.Body) + } else { + return &Result{rp.Header.Get(HEADER_REQUEST_ID), + rp.Header.Get(HEADER_ETAG_SERVER), FileUploadResponseFromJson(rp.Body)}, nil + } +} + +func (c *Client) GetFile(url string, isFullUrl bool) (*Result, *AppError) { + var rq *http.Request + if isFullUrl { + rq, _ = http.NewRequest("GET", url, nil) + } else { + rq, _ = http.NewRequest("GET", c.ApiUrl+"/files/get"+url, nil) + } + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + } else if rp.StatusCode >= 300 { + return nil, AppErrorFromJson(rp.Body) + } else { + return &Result{rp.Header.Get(HEADER_REQUEST_ID), + rp.Header.Get(HEADER_ETAG_SERVER), rp.Body}, nil + } +} + +func (c *Client) GetFileInfo(url string) (*Result, *AppError) { + var rq *http.Request + rq, _ = http.NewRequest("GET", c.ApiUrl+"/files/get_info"+url, nil) + + if len(c.AuthToken) > 0 { + rq.Header.Set(HEADER_AUTH, "BEARER "+c.AuthToken) + } + + if rp, err := c.HttpClient.Do(rq); err != nil { + return nil, NewLocAppError(url, "model.client.connecting.app_error", nil, err.Error()) + } else if rp.StatusCode >= 300 { + return nil, AppErrorFromJson(rp.Body) + } else { + return &Result{rp.Header.Get(HEADER_REQUEST_ID), + rp.Header.Get(HEADER_ETAG_SERVER), FileInfoFromJson(rp.Body)}, nil + } +} + +func (c *Client) GetPublicLink(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/files/get_public_link", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateUser(user *User) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/update", user.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateUserRoles(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/update_roles", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) AttachDeviceId(deviceId string) (*Result, *AppError) { + data := make(map[string]string) + data["device_id"] = deviceId + if r, err := c.DoApiPost("/users/attach_device", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateActive(userId string, active bool) (*Result, *AppError) { + data := make(map[string]string) + data["user_id"] = userId + data["active"] = strconv.FormatBool(active) + if r, err := c.DoApiPost("/users/update_active", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateUserNotify(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/update_notify", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) UpdateUserPassword(userId, currentPassword, newPassword string) (*Result, *AppError) { + data := make(map[string]string) + data["current_password"] = currentPassword + data["new_password"] = newPassword + data["user_id"] = userId + + if r, err := c.DoApiPost("/users/newpassword", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), UserFromJson(r.Body)}, nil + } +} + +func (c *Client) SendPasswordReset(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/send_password_reset", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) ResetPassword(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/reset_password", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetStatuses(data []string) (*Result, *AppError) { + if r, err := c.DoApiPost("/users/status", ArrayToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetMyTeam(etag string) (*Result, *AppError) { + if r, err := c.DoApiGet("/teams/me", "", etag); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), TeamFromJson(r.Body)}, nil + } +} + +func (c *Client) RegisterApp(app *OAuthApp) (*Result, *AppError) { + if r, err := c.DoApiPost("/oauth/register", app.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OAuthAppFromJson(r.Body)}, nil + } +} + +func (c *Client) AllowOAuth(rspType, clientId, redirect, scope, state string) (*Result, *AppError) { + if r, err := c.DoApiGet("/oauth/allow?response_type="+rspType+"&client_id="+clientId+"&redirect_uri="+url.QueryEscape(redirect)+"&scope="+scope+"&state="+url.QueryEscape(state), "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetAccessToken(data url.Values) (*Result, *AppError) { + if r, err := c.DoApiPost("/oauth/access_token", data.Encode()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), AccessResponseFromJson(r.Body)}, nil + } +} + +func (c *Client) CreateIncomingWebhook(hook *IncomingWebhook) (*Result, *AppError) { + if r, err := c.DoApiPost("/hooks/incoming/create", hook.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), IncomingWebhookFromJson(r.Body)}, nil + } +} + +func (c *Client) PostToWebhook(id, payload string) (*Result, *AppError) { + if r, err := c.DoPost("/hooks/"+id, payload, "application/x-www-form-urlencoded"); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), nil}, nil + } +} + +func (c *Client) DeleteIncomingWebhook(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/hooks/incoming/delete", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) ListIncomingWebhooks() (*Result, *AppError) { + if r, err := c.DoApiGet("/hooks/incoming/list", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), IncomingWebhookListFromJson(r.Body)}, nil + } +} + +func (c *Client) GetAllPreferences() (*Result, *AppError) { + if r, err := c.DoApiGet("/preferences/", "", ""); err != nil { + return nil, err + } else { + preferences, _ := PreferencesFromJson(r.Body) + return &Result{r.Header.Get(HEADER_REQUEST_ID), r.Header.Get(HEADER_ETAG_SERVER), preferences}, nil + } +} + +func (c *Client) SetPreferences(preferences *Preferences) (*Result, *AppError) { + if r, err := c.DoApiPost("/preferences/save", preferences.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), preferences}, nil + } +} + +func (c *Client) GetPreference(category string, name string) (*Result, *AppError) { + if r, err := c.DoApiGet("/preferences/"+category+"/"+name, "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), r.Header.Get(HEADER_ETAG_SERVER), PreferenceFromJson(r.Body)}, nil + } +} + +func (c *Client) GetPreferenceCategory(category string) (*Result, *AppError) { + if r, err := c.DoApiGet("/preferences/"+category, "", ""); err != nil { + return nil, err + } else { + preferences, _ := PreferencesFromJson(r.Body) + return &Result{r.Header.Get(HEADER_REQUEST_ID), r.Header.Get(HEADER_ETAG_SERVER), preferences}, nil + } +} + +func (c *Client) CreateOutgoingWebhook(hook *OutgoingWebhook) (*Result, *AppError) { + if r, err := c.DoApiPost("/hooks/outgoing/create", hook.ToJson()); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookFromJson(r.Body)}, nil + } +} + +func (c *Client) DeleteOutgoingWebhook(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/hooks/outgoing/delete", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) ListOutgoingWebhooks() (*Result, *AppError) { + if r, err := c.DoApiGet("/hooks/outgoing/list", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookListFromJson(r.Body)}, nil + } +} + +func (c *Client) RegenOutgoingWebhookToken(data map[string]string) (*Result, *AppError) { + if r, err := c.DoApiPost("/hooks/outgoing/regen_token", MapToJson(data)); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), OutgoingWebhookFromJson(r.Body)}, nil + } +} + +func (c *Client) MockSession(sessionToken string) { + c.AuthToken = sessionToken + c.AuthType = HEADER_BEARER +} + +func (c *Client) GetClientLicenceConfig() (*Result, *AppError) { + if r, err := c.DoApiGet("/license/client_config", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} + +func (c *Client) GetMeLoggedIn() (*Result, *AppError) { + if r, err := c.DoApiGet("/users/me_logged_in", "", ""); err != nil { + return nil, err + } else { + return &Result{r.Header.Get(HEADER_REQUEST_ID), + r.Header.Get(HEADER_ETAG_SERVER), MapFromJson(r.Body)}, nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/command.go b/vendor/github.com/mattermost/platform/model/command.go new file mode 100644 index 00000000..b854ae76 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/command.go @@ -0,0 +1,153 @@ +// Copyright (c) 2016 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + COMMAND_METHOD_POST = "P" + COMMAND_METHOD_GET = "G" +) + +type Command struct { + Id string `json:"id"` + Token string `json:"token"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + CreatorId string `json:"creator_id"` + TeamId string `json:"team_id"` + Trigger string `json:"trigger"` + Method string `json:"method"` + Username string `json:"username"` + IconURL string `json:"icon_url"` + AutoComplete bool `json:"auto_complete"` + AutoCompleteDesc string `json:"auto_complete_desc"` + AutoCompleteHint string `json:"auto_complete_hint"` + DisplayName string `json:"display_name"` + Description string `json:"description"` + URL string `json:"url"` +} + +func (o *Command) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func CommandFromJson(data io.Reader) *Command { + decoder := json.NewDecoder(data) + var o Command + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func CommandListToJson(l []*Command) string { + b, err := json.Marshal(l) + if err != nil { + return "" + } else { + return string(b) + } +} + +func CommandListFromJson(data io.Reader) []*Command { + decoder := json.NewDecoder(data) + var o []*Command + err := decoder.Decode(&o) + if err == nil { + return o + } else { + return nil + } +} + +func (o *Command) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewLocAppError("Command.IsValid", "model.command.is_valid.id.app_error", nil, "") + } + + if len(o.Token) != 26 { + return NewLocAppError("Command.IsValid", "model.command.is_valid.token.app_error", nil, "") + } + + if o.CreateAt == 0 { + return NewLocAppError("Command.IsValid", "model.command.is_valid.create_at.app_error", nil, "") + } + + if o.UpdateAt == 0 { + return NewLocAppError("Command.IsValid", "model.command.is_valid.update_at.app_error", nil, "") + } + + if len(o.CreatorId) != 26 { + return NewLocAppError("Command.IsValid", "model.command.is_valid.user_id.app_error", nil, "") + } + + if len(o.TeamId) != 26 { + return NewLocAppError("Command.IsValid", "model.command.is_valid.team_id.app_error", nil, "") + } + + if len(o.Trigger) > 128 { + return NewLocAppError("Command.IsValid", "model.command.is_valid.trigger.app_error", nil, "") + } + + if len(o.URL) == 0 || len(o.URL) > 1024 { + return NewLocAppError("Command.IsValid", "model.command.is_valid.url.app_error", nil, "") + } + + if !IsValidHttpUrl(o.URL) { + return NewLocAppError("Command.IsValid", "model.command.is_valid.url_http.app_error", nil, "") + } + + if !(o.Method == COMMAND_METHOD_GET || o.Method == COMMAND_METHOD_POST) { + return NewLocAppError("Command.IsValid", "model.command.is_valid.method.app_error", nil, "") + } + + if len(o.DisplayName) > 64 { + return NewLocAppError("Command.IsValid", "model.command.is_valid.display_name.app_error", nil, "") + } + + if len(o.Description) > 128 { + return NewLocAppError("Command.IsValid", "model.command.is_valid.description.app_error", nil, "") + } + + return nil +} + +func (o *Command) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + if o.Token == "" { + o.Token = NewId() + } + + o.CreateAt = GetMillis() + o.UpdateAt = o.CreateAt +} + +func (o *Command) PreUpdate() { + o.UpdateAt = GetMillis() +} + +func (o *Command) Sanitize() { + o.Token = "" + o.CreatorId = "" + o.Method = "" + o.URL = "" + o.Username = "" + o.IconURL = "" +} diff --git a/vendor/github.com/mattermost/platform/model/command_response.go b/vendor/github.com/mattermost/platform/model/command_response.go new file mode 100644 index 00000000..9314f38e --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/command_response.go @@ -0,0 +1,41 @@ +// Copyright (c) 2016 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + COMMAND_RESPONSE_TYPE_IN_CHANNEL = "in_channel" + COMMAND_RESPONSE_TYPE_EPHEMERAL = "ephemeral" +) + +type CommandResponse struct { + ResponseType string `json:"response_type"` + Text string `json:"text"` + GotoLocation string `json:"goto_location"` + Attachments interface{} `json:"attachments"` +} + +func (o *CommandResponse) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func CommandResponseFromJson(data io.Reader) *CommandResponse { + decoder := json.NewDecoder(data) + var o CommandResponse + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/compliance.go b/vendor/github.com/mattermost/platform/model/compliance.go new file mode 100644 index 00000000..4a96a597 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/compliance.go @@ -0,0 +1,132 @@ +// Copyright (c) 2016 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "strings" +) + +const ( + COMPLIANCE_STATUS_CREATED = "created" + COMPLIANCE_STATUS_RUNNING = "running" + COMPLIANCE_STATUS_FINISHED = "finished" + COMPLIANCE_STATUS_FAILED = "failed" + COMPLIANCE_STATUS_REMOVED = "removed" + + COMPLIANCE_TYPE_DAILY = "daily" + COMPLIANCE_TYPE_ADHOC = "adhoc" +) + +type Compliance struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UserId string `json:"user_id"` + Status string `json:"status"` + Count int `json:"count"` + Desc string `json:"desc"` + Type string `json:"type"` + StartAt int64 `json:"start_at"` + EndAt int64 `json:"end_at"` + Keywords string `json:"keywords"` + Emails string `json:"emails"` +} + +type Compliances []Compliance + +func (o *Compliance) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func (me *Compliance) PreSave() { + if me.Id == "" { + me.Id = NewId() + } + + if me.Status == "" { + me.Status = COMPLIANCE_STATUS_CREATED + } + + me.Count = 0 + me.Emails = strings.ToLower(me.Emails) + me.Keywords = strings.ToLower(me.Keywords) + + me.CreateAt = GetMillis() +} + +func (me *Compliance) JobName() string { + jobName := me.Type + if me.Type == COMPLIANCE_TYPE_DAILY { + jobName += "-" + me.Desc + } + + jobName += "-" + me.Id + + return jobName +} + +func (me *Compliance) IsValid() *AppError { + + if len(me.Id) != 26 { + return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.id.app_error", nil, "") + } + + if me.CreateAt == 0 { + return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.create_at.app_error", nil, "") + } + + if len(me.Desc) > 512 || len(me.Desc) == 0 { + return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.desc.app_error", nil, "") + } + + if me.StartAt == 0 { + return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.start_at.app_error", nil, "") + } + + if me.EndAt == 0 { + return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.end_at.app_error", nil, "") + } + + if me.EndAt <= me.StartAt { + return NewLocAppError("Compliance.IsValid", "model.compliance.is_valid.start_end_at.app_error", nil, "") + } + + return nil +} + +func ComplianceFromJson(data io.Reader) *Compliance { + decoder := json.NewDecoder(data) + var o Compliance + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func (o Compliances) ToJson() string { + if b, err := json.Marshal(o); err != nil { + return "[]" + } else { + return string(b) + } +} + +func CompliancesFromJson(data io.Reader) Compliances { + decoder := json.NewDecoder(data) + var o Compliances + err := decoder.Decode(&o) + if err == nil { + return o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/compliance_post.go b/vendor/github.com/mattermost/platform/model/compliance_post.go new file mode 100644 index 00000000..ce26a366 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/compliance_post.go @@ -0,0 +1,104 @@ +// Copyright (c) 2016 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "time" +) + +type CompliancePost struct { + + // From Team + TeamName string + TeamDisplayName string + + // From Channel + ChannelName string + ChannelDisplayName string + + // From User + UserUsername string + UserEmail string + UserNickname string + + // From Post + PostId string + PostCreateAt int64 + PostUpdateAt int64 + PostDeleteAt int64 + PostRootId string + PostParentId string + PostOriginalId string + PostMessage string + PostType string + PostProps string + PostHashtags string + PostFilenames string +} + +func CompliancePostHeader() []string { + return []string{ + "TeamName", + "TeamDisplayName", + + "ChannelName", + "ChannelDisplayName", + + "UserUsername", + "UserEmail", + "UserNickname", + + "PostId", + "PostCreateAt", + "PostUpdateAt", + "PostDeleteAt", + "PostRootId", + "PostParentId", + "PostOriginalId", + "PostMessage", + "PostType", + "PostProps", + "PostHashtags", + "PostFilenames", + } +} + +func (me *CompliancePost) Row() []string { + + postDeleteAt := "" + if me.PostDeleteAt > 0 { + postDeleteAt = time.Unix(0, me.PostDeleteAt*int64(1000*1000)).Format(time.RFC3339) + } + + postUpdateAt := "" + if me.PostUpdateAt != me.PostCreateAt { + postUpdateAt = time.Unix(0, me.PostUpdateAt*int64(1000*1000)).Format(time.RFC3339) + } + + return []string{ + me.TeamName, + me.TeamDisplayName, + + me.ChannelName, + me.ChannelDisplayName, + + me.UserUsername, + me.UserEmail, + me.UserNickname, + + me.PostId, + time.Unix(0, me.PostCreateAt*int64(1000*1000)).Format(time.RFC3339), + postUpdateAt, + postDeleteAt, + + me.PostRootId, + me.PostParentId, + me.PostOriginalId, + me.PostMessage, + me.PostType, + me.PostProps, + me.PostHashtags, + me.PostFilenames, + } +} diff --git a/vendor/github.com/mattermost/platform/model/config.go b/vendor/github.com/mattermost/platform/model/config.go new file mode 100644 index 00000000..a8974359 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/config.go @@ -0,0 +1,554 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + CONN_SECURITY_NONE = "" + CONN_SECURITY_TLS = "TLS" + CONN_SECURITY_STARTTLS = "STARTTLS" + + IMAGE_DRIVER_LOCAL = "local" + IMAGE_DRIVER_S3 = "amazons3" + + DATABASE_DRIVER_MYSQL = "mysql" + DATABASE_DRIVER_POSTGRES = "postgres" + + SERVICE_GITLAB = "gitlab" + SERVICE_GOOGLE = "google" + + WEBSERVER_MODE_REGULAR = "regular" + WEBSERVER_MODE_GZIP = "gzip" + WEBSERVER_MODE_DISABLED = "disabled" + + GENERIC_NOTIFICATION = "generic" + FULL_NOTIFICATION = "full" +) + +type ServiceSettings struct { + ListenAddress string + MaximumLoginAttempts int + SegmentDeveloperKey string + GoogleDeveloperKey string + EnableOAuthServiceProvider bool + EnableIncomingWebhooks bool + EnableOutgoingWebhooks bool + EnableCommands *bool + EnableOnlyAdminIntegrations *bool + EnablePostUsernameOverride bool + EnablePostIconOverride bool + EnableTesting bool + EnableDeveloper *bool + EnableSecurityFixAlert *bool + EnableInsecureOutgoingConnections *bool + EnableMultifactorAuthentication *bool + AllowCorsFrom *string + SessionLengthWebInDays *int + SessionLengthMobileInDays *int + SessionLengthSSOInDays *int + SessionCacheInMinutes *int + WebsocketSecurePort *int + WebsocketPort *int + WebserverMode *string +} + +type SSOSettings struct { + Enable bool + Secret string + Id string + Scope string + AuthEndpoint string + TokenEndpoint string + UserApiEndpoint string +} + +type SqlSettings struct { + DriverName string + DataSource string + DataSourceReplicas []string + MaxIdleConns int + MaxOpenConns int + Trace bool + AtRestEncryptKey string +} + +type LogSettings struct { + EnableConsole bool + ConsoleLevel string + EnableFile bool + FileLevel string + FileFormat string + FileLocation string +} + +type FileSettings struct { + DriverName string + Directory string + EnablePublicLink bool + PublicLinkSalt string + ThumbnailWidth int + ThumbnailHeight int + PreviewWidth int + PreviewHeight int + ProfileWidth int + ProfileHeight int + InitialFont string + AmazonS3AccessKeyId string + AmazonS3SecretAccessKey string + AmazonS3Bucket string + AmazonS3Region string + AmazonS3Endpoint string + AmazonS3BucketEndpoint string + AmazonS3LocationConstraint *bool + AmazonS3LowercaseBucket *bool +} + +type EmailSettings struct { + EnableSignUpWithEmail bool + EnableSignInWithEmail *bool + EnableSignInWithUsername *bool + SendEmailNotifications bool + RequireEmailVerification bool + FeedbackName string + FeedbackEmail string + SMTPUsername string + SMTPPassword string + SMTPServer string + SMTPPort string + ConnectionSecurity string + InviteSalt string + PasswordResetSalt string + SendPushNotifications *bool + PushNotificationServer *string + PushNotificationContents *string +} + +type RateLimitSettings struct { + EnableRateLimiter bool + PerSec int + MemoryStoreSize int + VaryByRemoteAddr bool + VaryByHeader string +} + +type PrivacySettings struct { + ShowEmailAddress bool + ShowFullName bool +} + +type SupportSettings struct { + TermsOfServiceLink *string + PrivacyPolicyLink *string + AboutLink *string + HelpLink *string + ReportAProblemLink *string + SupportEmail *string +} + +type TeamSettings struct { + SiteName string + MaxUsersPerTeam int + EnableTeamCreation bool + EnableUserCreation bool + RestrictCreationToDomains string + RestrictTeamNames *bool + EnableTeamListing *bool +} + +type LdapSettings struct { + // Basic + Enable *bool + LdapServer *string + LdapPort *int + ConnectionSecurity *string + BaseDN *string + BindUsername *string + BindPassword *string + + // Filtering + UserFilter *string + + // User Mapping + FirstNameAttribute *string + LastNameAttribute *string + EmailAttribute *string + UsernameAttribute *string + IdAttribute *string + + // Advanced + SkipCertificateVerification *bool + QueryTimeout *int +} + +type ComplianceSettings struct { + Enable *bool + Directory *string + EnableDaily *bool +} + +type Config struct { + ServiceSettings ServiceSettings + TeamSettings TeamSettings + SqlSettings SqlSettings + LogSettings LogSettings + FileSettings FileSettings + EmailSettings EmailSettings + RateLimitSettings RateLimitSettings + PrivacySettings PrivacySettings + SupportSettings SupportSettings + GitLabSettings SSOSettings + GoogleSettings SSOSettings + LdapSettings LdapSettings + ComplianceSettings ComplianceSettings +} + +func (o *Config) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func (o *Config) GetSSOService(service string) *SSOSettings { + switch service { + case SERVICE_GITLAB: + return &o.GitLabSettings + case SERVICE_GOOGLE: + return &o.GoogleSettings + } + + return nil +} + +func ConfigFromJson(data io.Reader) *Config { + decoder := json.NewDecoder(data) + var o Config + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func (o *Config) SetDefaults() { + + if len(o.SqlSettings.AtRestEncryptKey) == 0 { + o.SqlSettings.AtRestEncryptKey = NewRandomString(32) + } + + if len(o.FileSettings.PublicLinkSalt) == 0 { + o.FileSettings.PublicLinkSalt = NewRandomString(32) + } + + if o.FileSettings.AmazonS3LocationConstraint == nil { + o.FileSettings.AmazonS3LocationConstraint = new(bool) + *o.FileSettings.AmazonS3LocationConstraint = false + } + + if o.FileSettings.AmazonS3LowercaseBucket == nil { + o.FileSettings.AmazonS3LowercaseBucket = new(bool) + *o.FileSettings.AmazonS3LowercaseBucket = false + } + + if len(o.EmailSettings.InviteSalt) == 0 { + o.EmailSettings.InviteSalt = NewRandomString(32) + } + + if len(o.EmailSettings.PasswordResetSalt) == 0 { + o.EmailSettings.PasswordResetSalt = NewRandomString(32) + } + + if o.ServiceSettings.EnableDeveloper == nil { + o.ServiceSettings.EnableDeveloper = new(bool) + *o.ServiceSettings.EnableDeveloper = false + } + + if o.ServiceSettings.EnableSecurityFixAlert == nil { + o.ServiceSettings.EnableSecurityFixAlert = new(bool) + *o.ServiceSettings.EnableSecurityFixAlert = true + } + + if o.ServiceSettings.EnableInsecureOutgoingConnections == nil { + o.ServiceSettings.EnableInsecureOutgoingConnections = new(bool) + *o.ServiceSettings.EnableInsecureOutgoingConnections = false + } + + if o.ServiceSettings.EnableMultifactorAuthentication == nil { + o.ServiceSettings.EnableMultifactorAuthentication = new(bool) + *o.ServiceSettings.EnableMultifactorAuthentication = false + } + + if o.TeamSettings.RestrictTeamNames == nil { + o.TeamSettings.RestrictTeamNames = new(bool) + *o.TeamSettings.RestrictTeamNames = true + } + + if o.TeamSettings.EnableTeamListing == nil { + o.TeamSettings.EnableTeamListing = new(bool) + *o.TeamSettings.EnableTeamListing = false + } + + if o.EmailSettings.EnableSignInWithEmail == nil { + o.EmailSettings.EnableSignInWithEmail = new(bool) + + if o.EmailSettings.EnableSignUpWithEmail == true { + *o.EmailSettings.EnableSignInWithEmail = true + } else { + *o.EmailSettings.EnableSignInWithEmail = false + } + } + + if o.EmailSettings.EnableSignInWithUsername == nil { + o.EmailSettings.EnableSignInWithUsername = new(bool) + *o.EmailSettings.EnableSignInWithUsername = false + } + + if o.EmailSettings.SendPushNotifications == nil { + o.EmailSettings.SendPushNotifications = new(bool) + *o.EmailSettings.SendPushNotifications = false + } + + if o.EmailSettings.PushNotificationServer == nil { + o.EmailSettings.PushNotificationServer = new(string) + *o.EmailSettings.PushNotificationServer = "" + } + + if o.EmailSettings.PushNotificationContents == nil { + o.EmailSettings.PushNotificationContents = new(string) + *o.EmailSettings.PushNotificationContents = GENERIC_NOTIFICATION + } + + if o.SupportSettings.TermsOfServiceLink == nil { + o.SupportSettings.TermsOfServiceLink = new(string) + *o.SupportSettings.TermsOfServiceLink = "/static/help/terms.html" + } + + if o.SupportSettings.PrivacyPolicyLink == nil { + o.SupportSettings.PrivacyPolicyLink = new(string) + *o.SupportSettings.PrivacyPolicyLink = "/static/help/privacy.html" + } + + if o.SupportSettings.AboutLink == nil { + o.SupportSettings.AboutLink = new(string) + *o.SupportSettings.AboutLink = "/static/help/about.html" + } + + if o.SupportSettings.HelpLink == nil { + o.SupportSettings.HelpLink = new(string) + *o.SupportSettings.HelpLink = "/static/help/help.html" + } + + if o.SupportSettings.ReportAProblemLink == nil { + o.SupportSettings.ReportAProblemLink = new(string) + *o.SupportSettings.ReportAProblemLink = "/static/help/report_problem.html" + } + + if o.SupportSettings.SupportEmail == nil { + o.SupportSettings.SupportEmail = new(string) + *o.SupportSettings.SupportEmail = "feedback@mattermost.com" + } + + if o.LdapSettings.LdapPort == nil { + o.LdapSettings.LdapPort = new(int) + *o.LdapSettings.LdapPort = 389 + } + + if o.LdapSettings.QueryTimeout == nil { + o.LdapSettings.QueryTimeout = new(int) + *o.LdapSettings.QueryTimeout = 60 + } + + if o.LdapSettings.Enable == nil { + o.LdapSettings.Enable = new(bool) + *o.LdapSettings.Enable = false + } + + if o.LdapSettings.UserFilter == nil { + o.LdapSettings.UserFilter = new(string) + *o.LdapSettings.UserFilter = "" + } + + if o.ServiceSettings.SessionLengthWebInDays == nil { + o.ServiceSettings.SessionLengthWebInDays = new(int) + *o.ServiceSettings.SessionLengthWebInDays = 30 + } + + if o.ServiceSettings.SessionLengthMobileInDays == nil { + o.ServiceSettings.SessionLengthMobileInDays = new(int) + *o.ServiceSettings.SessionLengthMobileInDays = 30 + } + + if o.ServiceSettings.SessionLengthSSOInDays == nil { + o.ServiceSettings.SessionLengthSSOInDays = new(int) + *o.ServiceSettings.SessionLengthSSOInDays = 30 + } + + if o.ServiceSettings.SessionCacheInMinutes == nil { + o.ServiceSettings.SessionCacheInMinutes = new(int) + *o.ServiceSettings.SessionCacheInMinutes = 10 + } + + if o.ServiceSettings.EnableCommands == nil { + o.ServiceSettings.EnableCommands = new(bool) + *o.ServiceSettings.EnableCommands = false + } + + if o.ServiceSettings.EnableOnlyAdminIntegrations == nil { + o.ServiceSettings.EnableOnlyAdminIntegrations = new(bool) + *o.ServiceSettings.EnableOnlyAdminIntegrations = true + } + + if o.ServiceSettings.WebsocketPort == nil { + o.ServiceSettings.WebsocketPort = new(int) + *o.ServiceSettings.WebsocketPort = 80 + } + + if o.ServiceSettings.WebsocketSecurePort == nil { + o.ServiceSettings.WebsocketSecurePort = new(int) + *o.ServiceSettings.WebsocketSecurePort = 443 + } + + if o.ServiceSettings.AllowCorsFrom == nil { + o.ServiceSettings.AllowCorsFrom = new(string) + *o.ServiceSettings.AllowCorsFrom = "" + } + + if o.ServiceSettings.WebserverMode == nil { + o.ServiceSettings.WebserverMode = new(string) + *o.ServiceSettings.WebserverMode = "regular" + } + + if o.ComplianceSettings.Enable == nil { + o.ComplianceSettings.Enable = new(bool) + *o.ComplianceSettings.Enable = false + } + + if o.ComplianceSettings.Directory == nil { + o.ComplianceSettings.Directory = new(string) + *o.ComplianceSettings.Directory = "./data/" + } + + if o.ComplianceSettings.EnableDaily == nil { + o.ComplianceSettings.EnableDaily = new(bool) + *o.ComplianceSettings.EnableDaily = false + } + + if o.LdapSettings.ConnectionSecurity == nil { + o.LdapSettings.ConnectionSecurity = new(string) + *o.LdapSettings.ConnectionSecurity = "" + } + + if o.LdapSettings.SkipCertificateVerification == nil { + o.LdapSettings.SkipCertificateVerification = new(bool) + *o.LdapSettings.SkipCertificateVerification = false + } +} + +func (o *Config) IsValid() *AppError { + + if o.ServiceSettings.MaximumLoginAttempts <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.login_attempts.app_error", nil, "") + } + + if len(o.ServiceSettings.ListenAddress) == 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.listen_address.app_error", nil, "") + } + + if o.TeamSettings.MaxUsersPerTeam <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.max_users.app_error", nil, "") + } + + if len(o.SqlSettings.AtRestEncryptKey) < 32 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.encrypt_sql.app_error", nil, "") + } + + if !(o.SqlSettings.DriverName == DATABASE_DRIVER_MYSQL || o.SqlSettings.DriverName == DATABASE_DRIVER_POSTGRES) { + return NewLocAppError("Config.IsValid", "model.config.is_valid.sql_driver.app_error", nil, "") + } + + if o.SqlSettings.MaxIdleConns <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.sql_idle.app_error", nil, "") + } + + if len(o.SqlSettings.DataSource) == 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.sql_data_src.app_error", nil, "") + } + + if o.SqlSettings.MaxOpenConns <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.sql_max_conn.app_error", nil, "") + } + + if !(o.FileSettings.DriverName == IMAGE_DRIVER_LOCAL || o.FileSettings.DriverName == IMAGE_DRIVER_S3) { + return NewLocAppError("Config.IsValid", "model.config.is_valid.file_driver.app_error", nil, "") + } + + if o.FileSettings.PreviewHeight < 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.file_preview_height.app_error", nil, "") + } + + if o.FileSettings.PreviewWidth <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.file_preview_width.app_error", nil, "") + } + + if o.FileSettings.ProfileHeight <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.file_profile_height.app_error", nil, "") + } + + if o.FileSettings.ProfileWidth <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.file_profile_width.app_error", nil, "") + } + + if o.FileSettings.ThumbnailHeight <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.file_thumb_height.app_error", nil, "") + } + + if o.FileSettings.ThumbnailWidth <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.file_thumb_width.app_error", nil, "") + } + + if len(o.FileSettings.PublicLinkSalt) < 32 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.file_salt.app_error", nil, "") + } + + if !(o.EmailSettings.ConnectionSecurity == CONN_SECURITY_NONE || o.EmailSettings.ConnectionSecurity == CONN_SECURITY_TLS || o.EmailSettings.ConnectionSecurity == CONN_SECURITY_STARTTLS) { + return NewLocAppError("Config.IsValid", "model.config.is_valid.email_security.app_error", nil, "") + } + + if len(o.EmailSettings.InviteSalt) < 32 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.email_salt.app_error", nil, "") + } + + if len(o.EmailSettings.PasswordResetSalt) < 32 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.email_reset_salt.app_error", nil, "") + } + + if o.RateLimitSettings.MemoryStoreSize <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.rate_mem.app_error", nil, "") + } + + if o.RateLimitSettings.PerSec <= 0 { + return NewLocAppError("Config.IsValid", "model.config.is_valid.rate_sec.app_error", nil, "") + } + + if !(*o.LdapSettings.ConnectionSecurity == CONN_SECURITY_NONE || *o.LdapSettings.ConnectionSecurity == CONN_SECURITY_TLS || *o.LdapSettings.ConnectionSecurity == CONN_SECURITY_STARTTLS) { + return NewLocAppError("Config.IsValid", "model.config.is_valid.ldap_security.app_error", nil, "") + } + + return nil +} + +func (me *Config) GetSanitizeOptions() map[string]bool { + options := map[string]bool{} + options["fullname"] = me.PrivacySettings.ShowFullName + options["email"] = me.PrivacySettings.ShowEmailAddress + + return options +} diff --git a/vendor/github.com/mattermost/platform/model/file.go b/vendor/github.com/mattermost/platform/model/file.go new file mode 100644 index 00000000..b7806b3b --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/file.go @@ -0,0 +1,43 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + MAX_FILE_SIZE = 50000000 // 50 MB +) + +var ( + IMAGE_EXTENSIONS = [5]string{".jpg", ".jpeg", ".gif", ".bmp", ".png"} + IMAGE_MIME_TYPES = map[string]string{".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".gif": "image/gif", ".bmp": "image/bmp", ".png": "image/png", ".tiff": "image/tiff"} +) + +type FileUploadResponse struct { + Filenames []string `json:"filenames"` + ClientIds []string `json:"client_ids"` +} + +func FileUploadResponseFromJson(data io.Reader) *FileUploadResponse { + decoder := json.NewDecoder(data) + var o FileUploadResponse + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func (o *FileUploadResponse) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} diff --git a/vendor/github.com/mattermost/platform/model/file_info.go b/vendor/github.com/mattermost/platform/model/file_info.go new file mode 100644 index 00000000..f785042b --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/file_info.go @@ -0,0 +1,77 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "bytes" + "encoding/json" + "image/gif" + "io" + "mime" + "path/filepath" +) + +type FileInfo struct { + Filename string `json:"filename"` + Size int `json:"size"` + Extension string `json:"extension"` + MimeType string `json:"mime_type"` + HasPreviewImage bool `json:"has_preview_image"` +} + +func GetInfoForBytes(filename string, data []byte) (*FileInfo, *AppError) { + size := len(data) + + var mimeType string + extension := filepath.Ext(filename) + isImage := IsFileExtImage(extension) + if isImage { + mimeType = GetImageMimeType(extension) + } else { + mimeType = mime.TypeByExtension(extension) + } + + if extension != "" && extension[0] == '.' { + // the client expects a file extension without the leading period + extension = extension[1:] + } + + hasPreviewImage := isImage + if mimeType == "image/gif" { + // just show the gif itself instead of a preview image for animated gifs + if gifImage, err := gif.DecodeAll(bytes.NewReader(data)); err != nil { + return nil, NewLocAppError("GetInfoForBytes", "model.file_info.get.gif.app_error", nil, "filename="+filename) + } else { + hasPreviewImage = len(gifImage.Image) == 1 + } + } + + return &FileInfo{ + Filename: filename, + Size: size, + Extension: extension, + MimeType: mimeType, + HasPreviewImage: hasPreviewImage, + }, nil +} + +func (info *FileInfo) ToJson() string { + b, err := json.Marshal(info) + if err != nil { + return "" + } else { + return string(b) + } +} + +func FileInfoFromJson(data io.Reader) *FileInfo { + decoder := json.NewDecoder(data) + + var info FileInfo + if err := decoder.Decode(&info); err != nil { + return nil + } else { + return &info + } +} diff --git a/vendor/github.com/mattermost/platform/model/gitlab/gitlab.go b/vendor/github.com/mattermost/platform/model/gitlab/gitlab.go new file mode 100644 index 00000000..3ca49997 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/gitlab/gitlab.go @@ -0,0 +1,107 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package oauthgitlab + +import ( + "encoding/json" + "github.com/mattermost/platform/einterfaces" + "github.com/mattermost/platform/model" + "io" + "strconv" + "strings" +) + +const ( + USER_AUTH_SERVICE_GITLAB = "gitlab" +) + +type GitLabProvider struct { +} + +type GitLabUser struct { + Id int64 `json:"id"` + Username string `json:"username"` + Login string `json:"login"` + Email string `json:"email"` + Name string `json:"name"` +} + +func init() { + provider := &GitLabProvider{} + einterfaces.RegisterOauthProvider(USER_AUTH_SERVICE_GITLAB, provider) +} + +func userFromGitLabUser(glu *GitLabUser) *model.User { + user := &model.User{} + username := glu.Username + if username == "" { + username = glu.Login + } + user.Username = model.CleanUsername(username) + splitName := strings.Split(glu.Name, " ") + if len(splitName) == 2 { + user.FirstName = splitName[0] + user.LastName = splitName[1] + } else if len(splitName) >= 2 { + user.FirstName = splitName[0] + user.LastName = strings.Join(splitName[1:], " ") + } else { + user.FirstName = glu.Name + } + user.Email = glu.Email + user.AuthData = strconv.FormatInt(glu.Id, 10) + user.AuthService = USER_AUTH_SERVICE_GITLAB + + return user +} + +func gitLabUserFromJson(data io.Reader) *GitLabUser { + decoder := json.NewDecoder(data) + var glu GitLabUser + err := decoder.Decode(&glu) + if err == nil { + return &glu + } else { + return nil + } +} + +func (glu *GitLabUser) IsValid() bool { + if glu.Id == 0 { + return false + } + + if len(glu.Email) == 0 { + return false + } + + return true +} + +func (glu *GitLabUser) getAuthData() string { + return strconv.FormatInt(glu.Id, 10) +} + +func (m *GitLabProvider) GetIdentifier() string { + return USER_AUTH_SERVICE_GITLAB +} + +func (m *GitLabProvider) GetUserFromJson(data io.Reader) *model.User { + glu := gitLabUserFromJson(data) + if glu.IsValid() { + return userFromGitLabUser(glu) + } + + return &model.User{} +} + +func (m *GitLabProvider) GetAuthDataFromJson(data io.Reader) string { + glu := gitLabUserFromJson(data) + + if glu.IsValid() { + return glu.getAuthData() + } + + return "" +} diff --git a/vendor/github.com/mattermost/platform/model/incoming_webhook.go b/vendor/github.com/mattermost/platform/model/incoming_webhook.go new file mode 100644 index 00000000..0763b443 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/incoming_webhook.go @@ -0,0 +1,137 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + DEFAULT_WEBHOOK_USERNAME = "webhook" + DEFAULT_WEBHOOK_ICON = "/static/images/webhook_icon.jpg" +) + +type IncomingWebhook struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + UserId string `json:"user_id"` + ChannelId string `json:"channel_id"` + TeamId string `json:"team_id"` + DisplayName string `json:"display_name"` + Description string `json:"description"` +} + +type IncomingWebhookRequest struct { + Text string `json:"text"` + Username string `json:"username"` + IconURL string `json:"icon_url"` + ChannelName string `json:"channel"` + Props StringInterface `json:"props"` + Attachments interface{} `json:"attachments"` + Type string `json:"type"` +} + +func (o *IncomingWebhook) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func IncomingWebhookFromJson(data io.Reader) *IncomingWebhook { + decoder := json.NewDecoder(data) + var o IncomingWebhook + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func IncomingWebhookListToJson(l []*IncomingWebhook) string { + b, err := json.Marshal(l) + if err != nil { + return "" + } else { + return string(b) + } +} + +func IncomingWebhookListFromJson(data io.Reader) []*IncomingWebhook { + decoder := json.NewDecoder(data) + var o []*IncomingWebhook + err := decoder.Decode(&o) + if err == nil { + return o + } else { + return nil + } +} + +func (o *IncomingWebhook) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewLocAppError("IncomingWebhook.IsValid", "model.incoming_hook.id.app_error", nil, "") + } + + if o.CreateAt == 0 { + return NewLocAppError("IncomingWebhook.IsValid", "model.incoming_hook.create_at.app_error", nil, "id="+o.Id) + } + + if o.UpdateAt == 0 { + return NewLocAppError("IncomingWebhook.IsValid", "model.incoming_hook.update_at.app_error", nil, "id="+o.Id) + } + + if len(o.UserId) != 26 { + return NewLocAppError("IncomingWebhook.IsValid", "model.incoming_hook.user_id.app_error", nil, "") + } + + if len(o.ChannelId) != 26 { + return NewLocAppError("IncomingWebhook.IsValid", "model.incoming_hook.channel_id.app_error", nil, "") + } + + if len(o.TeamId) != 26 { + return NewLocAppError("IncomingWebhook.IsValid", "model.incoming_hook.team_id.app_error", nil, "") + } + + if len(o.DisplayName) > 64 { + return NewLocAppError("IncomingWebhook.IsValid", "model.incoming_hook.display_name.app_error", nil, "") + } + + if len(o.Description) > 128 { + return NewLocAppError("IncomingWebhook.IsValid", "model.incoming_hook.description.app_error", nil, "") + } + + return nil +} + +func (o *IncomingWebhook) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + o.CreateAt = GetMillis() + o.UpdateAt = o.CreateAt +} + +func (o *IncomingWebhook) PreUpdate() { + o.UpdateAt = GetMillis() +} + +func IncomingWebhookRequestFromJson(data io.Reader) *IncomingWebhookRequest { + decoder := json.NewDecoder(data) + var o IncomingWebhookRequest + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/ldap.go b/vendor/github.com/mattermost/platform/model/ldap.go new file mode 100644 index 00000000..5fde06a6 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/ldap.go @@ -0,0 +1,8 @@ +// Copyright (c) 2016 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +const ( + USER_AUTH_SERVICE_LDAP = "ldap" +) diff --git a/vendor/github.com/mattermost/platform/model/license.go b/vendor/github.com/mattermost/platform/model/license.go new file mode 100644 index 00000000..cab22a68 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/license.go @@ -0,0 +1,123 @@ +// Copyright (c) 2016 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type LicenseRecord struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + Bytes string `json:"-"` +} + +type License struct { + Id string `json:"id"` + IssuedAt int64 `json:"issued_at"` + StartsAt int64 `json:"starts_at"` + ExpiresAt int64 `json:"expires_at"` + Customer *Customer `json:"customer"` + Features *Features `json:"features"` +} + +type Customer struct { + Id string `json:"id"` + Name string `json:"name"` + Email string `json:"email"` + Company string `json:"company"` + PhoneNumber string `json:"phone_number"` +} + +type Features struct { + Users *int `json:"users"` + LDAP *bool `json:"ldap"` + MFA *bool `json:"mfa"` + GoogleSSO *bool `json:"google_sso"` + Compliance *bool `json:"compliance"` +} + +func (f *Features) SetDefaults() { + if f.Users == nil { + f.Users = new(int) + *f.Users = 0 + } + + if f.LDAP == nil { + f.LDAP = new(bool) + *f.LDAP = true + } + + if f.MFA == nil { + f.MFA = new(bool) + *f.MFA = true + } + + if f.GoogleSSO == nil { + f.GoogleSSO = new(bool) + *f.GoogleSSO = true + } + + if f.Compliance == nil { + f.Compliance = new(bool) + *f.Compliance = true + } +} + +func (l *License) IsExpired() bool { + now := GetMillis() + if l.ExpiresAt < now { + return true + } + return false +} + +func (l *License) IsStarted() bool { + now := GetMillis() + if l.StartsAt < now { + return true + } + return false +} + +func (l *License) ToJson() string { + b, err := json.Marshal(l) + if err != nil { + return "" + } else { + return string(b) + } +} + +func LicenseFromJson(data io.Reader) *License { + decoder := json.NewDecoder(data) + var o License + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func (lr *LicenseRecord) IsValid() *AppError { + if len(lr.Id) != 26 { + return NewLocAppError("LicenseRecord.IsValid", "model.license_record.is_valid.id.app_error", nil, "") + } + + if lr.CreateAt == 0 { + return NewLocAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "") + } + + if len(lr.Bytes) == 0 || len(lr.Bytes) > 10000 { + return NewLocAppError("LicenseRecord.IsValid", "model.license_record.is_valid.create_at.app_error", nil, "") + } + + return nil +} + +func (lr *LicenseRecord) PreSave() { + lr.CreateAt = GetMillis() +} diff --git a/vendor/github.com/mattermost/platform/model/message.go b/vendor/github.com/mattermost/platform/model/message.go new file mode 100644 index 00000000..cce0ec09 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/message.go @@ -0,0 +1,58 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + ACTION_TYPING = "typing" + ACTION_POSTED = "posted" + ACTION_POST_EDITED = "post_edited" + ACTION_POST_DELETED = "post_deleted" + ACTION_CHANNEL_VIEWED = "channel_viewed" + ACTION_NEW_USER = "new_user" + ACTION_USER_ADDED = "user_added" + ACTION_USER_REMOVED = "user_removed" + ACTION_PREFERENCE_CHANGED = "preference_changed" + ACTION_EPHEMERAL_MESSAGE = "ephemeral_message" +) + +type Message struct { + TeamId string `json:"team_id"` + ChannelId string `json:"channel_id"` + UserId string `json:"user_id"` + Action string `json:"action"` + Props map[string]string `json:"props"` +} + +func (m *Message) Add(key string, value string) { + m.Props[key] = value +} + +func NewMessage(teamId string, channelId string, userId string, action string) *Message { + return &Message{TeamId: teamId, ChannelId: channelId, UserId: userId, Action: action, Props: make(map[string]string)} +} + +func (o *Message) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func MessageFromJson(data io.Reader) *Message { + decoder := json.NewDecoder(data) + var o Message + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/oauth.go b/vendor/github.com/mattermost/platform/model/oauth.go new file mode 100644 index 00000000..c54df107 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/oauth.go @@ -0,0 +1,159 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" + "unicode/utf8" +) + +const ( + OAUTH_ACTION_SIGNUP = "signup" + OAUTH_ACTION_LOGIN = "login" + OAUTH_ACTION_EMAIL_TO_SSO = "email_to_sso" + OAUTH_ACTION_SSO_TO_EMAIL = "sso_to_email" +) + +type OAuthApp struct { + Id string `json:"id"` + CreatorId string `json:"creator_id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + ClientSecret string `json:"client_secret"` + Name string `json:"name"` + Description string `json:"description"` + CallbackUrls StringArray `json:"callback_urls"` + Homepage string `json:"homepage"` +} + +// IsValid validates the app and returns an error if it isn't configured +// correctly. +func (a *OAuthApp) IsValid() *AppError { + + if len(a.Id) != 26 { + return NewLocAppError("OAuthApp.IsValid", "model.oauth.is_valid.app_id.app_error", nil, "") + } + + if a.CreateAt == 0 { + return NewLocAppError("OAuthApp.IsValid", "model.oauth.is_valid.create_at.app_error", nil, "app_id="+a.Id) + } + + if a.UpdateAt == 0 { + return NewLocAppError("OAuthApp.IsValid", "model.oauth.is_valid.update_at.app_error", nil, "app_id="+a.Id) + } + + if len(a.CreatorId) != 26 { + return NewLocAppError("OAuthApp.IsValid", "model.oauth.is_valid.creator_id.app_error", nil, "app_id="+a.Id) + } + + if len(a.ClientSecret) == 0 || len(a.ClientSecret) > 128 { + return NewLocAppError("OAuthApp.IsValid", "model.oauth.is_valid.client_secret.app_error", nil, "app_id="+a.Id) + } + + if len(a.Name) == 0 || len(a.Name) > 64 { + return NewLocAppError("OAuthApp.IsValid", "model.oauth.is_valid.name.app_error", nil, "app_id="+a.Id) + } + + if len(a.CallbackUrls) == 0 || len(fmt.Sprintf("%s", a.CallbackUrls)) > 1024 { + return NewLocAppError("OAuthApp.IsValid", "model.oauth.is_valid.callback.app_error", nil, "app_id="+a.Id) + } + + if len(a.Homepage) == 0 || len(a.Homepage) > 256 { + return NewLocAppError("OAuthApp.IsValid", "model.oauth.is_valid.homepage.app_error", nil, "app_id="+a.Id) + } + + if utf8.RuneCountInString(a.Description) > 512 { + return NewLocAppError("OAuthApp.IsValid", "model.oauth.is_valid.description.app_error", nil, "app_id="+a.Id) + } + + return nil +} + +// PreSave will set the Id and ClientSecret if missing. It will also fill +// in the CreateAt, UpdateAt times. It should be run before saving the app to the db. +func (a *OAuthApp) PreSave() { + if a.Id == "" { + a.Id = NewId() + } + + if a.ClientSecret == "" { + a.ClientSecret = NewId() + } + + a.CreateAt = GetMillis() + a.UpdateAt = a.CreateAt + + if len(a.ClientSecret) > 0 { + a.ClientSecret = HashPassword(a.ClientSecret) + } +} + +// PreUpdate should be run before updating the app in the db. +func (a *OAuthApp) PreUpdate() { + a.UpdateAt = GetMillis() +} + +// ToJson convert a User to a json string +func (a *OAuthApp) ToJson() string { + b, err := json.Marshal(a) + if err != nil { + return "" + } else { + return string(b) + } +} + +// Generate a valid strong etag so the browser can cache the results +func (a *OAuthApp) Etag() string { + return Etag(a.Id, a.UpdateAt) +} + +// Remove any private data from the app object +func (a *OAuthApp) Sanitize() { + a.ClientSecret = "" +} + +func (a *OAuthApp) IsValidRedirectURL(url string) bool { + for _, u := range a.CallbackUrls { + if u == url { + return true + } + } + + return false +} + +// OAuthAppFromJson will decode the input and return a User +func OAuthAppFromJson(data io.Reader) *OAuthApp { + decoder := json.NewDecoder(data) + var app OAuthApp + err := decoder.Decode(&app) + if err == nil { + return &app + } else { + return nil + } +} + +func OAuthAppMapToJson(a map[string]*OAuthApp) string { + b, err := json.Marshal(a) + if err != nil { + return "" + } else { + return string(b) + } +} + +func OAuthAppMapFromJson(data io.Reader) map[string]*OAuthApp { + decoder := json.NewDecoder(data) + var apps map[string]*OAuthApp + err := decoder.Decode(&apps) + if err == nil { + return apps + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/outgoing_webhook.go b/vendor/github.com/mattermost/platform/model/outgoing_webhook.go new file mode 100644 index 00000000..e13de908 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/outgoing_webhook.go @@ -0,0 +1,151 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" +) + +type OutgoingWebhook struct { + Id string `json:"id"` + Token string `json:"token"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + CreatorId string `json:"creator_id"` + ChannelId string `json:"channel_id"` + TeamId string `json:"team_id"` + TriggerWords StringArray `json:"trigger_words"` + CallbackURLs StringArray `json:"callback_urls"` + DisplayName string `json:"display_name"` + Description string `json:"description"` +} + +func (o *OutgoingWebhook) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func OutgoingWebhookFromJson(data io.Reader) *OutgoingWebhook { + decoder := json.NewDecoder(data) + var o OutgoingWebhook + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func OutgoingWebhookListToJson(l []*OutgoingWebhook) string { + b, err := json.Marshal(l) + if err != nil { + return "" + } else { + return string(b) + } +} + +func OutgoingWebhookListFromJson(data io.Reader) []*OutgoingWebhook { + decoder := json.NewDecoder(data) + var o []*OutgoingWebhook + err := decoder.Decode(&o) + if err == nil { + return o + } else { + return nil + } +} + +func (o *OutgoingWebhook) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.id.app_error", nil, "") + } + + if len(o.Token) != 26 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.token.app_error", nil, "") + } + + if o.CreateAt == 0 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.create_at.app_error", nil, "id="+o.Id) + } + + if o.UpdateAt == 0 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.update_at.app_error", nil, "id="+o.Id) + } + + if len(o.CreatorId) != 26 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.user_id.app_error", nil, "") + } + + if len(o.ChannelId) != 0 && len(o.ChannelId) != 26 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.channel_id.app_error", nil, "") + } + + if len(o.TeamId) != 26 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.team_id.app_error", nil, "") + } + + if len(fmt.Sprintf("%s", o.TriggerWords)) > 1024 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.words.app_error", nil, "") + } + + if len(o.CallbackURLs) == 0 || len(fmt.Sprintf("%s", o.CallbackURLs)) > 1024 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.callback.app_error", nil, "") + } + + for _, callback := range o.CallbackURLs { + if !IsValidHttpUrl(callback) { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.url.app_error", nil, "") + } + } + + if len(o.DisplayName) > 64 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.display_name.app_error", nil, "") + } + + if len(o.Description) > 128 { + return NewLocAppError("OutgoingWebhook.IsValid", "model.outgoing_hook.is_valid.description.app_error", nil, "") + } + + return nil +} + +func (o *OutgoingWebhook) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + if o.Token == "" { + o.Token = NewId() + } + + o.CreateAt = GetMillis() + o.UpdateAt = o.CreateAt +} + +func (o *OutgoingWebhook) PreUpdate() { + o.UpdateAt = GetMillis() +} + +func (o *OutgoingWebhook) HasTriggerWord(word string) bool { + if len(o.TriggerWords) == 0 || len(word) == 0 { + return false + } + + for _, trigger := range o.TriggerWords { + if trigger == word { + return true + } + } + + return false +} diff --git a/vendor/github.com/mattermost/platform/model/post.go b/vendor/github.com/mattermost/platform/model/post.go new file mode 100644 index 00000000..8a451831 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/post.go @@ -0,0 +1,169 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "unicode/utf8" +) + +const ( + POST_SYSTEM_MESSAGE_PREFIX = "system_" + POST_DEFAULT = "" + POST_SLACK_ATTACHMENT = "slack_attachment" + POST_SYSTEM_GENERIC = "system_generic" + POST_JOIN_LEAVE = "system_join_leave" + POST_HEADER_CHANGE = "system_header_change" + POST_EPHEMERAL = "system_ephemeral" +) + +type Post struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + UserId string `json:"user_id"` + ChannelId string `json:"channel_id"` + RootId string `json:"root_id"` + ParentId string `json:"parent_id"` + OriginalId string `json:"original_id"` + Message string `json:"message"` + Type string `json:"type"` + Props StringInterface `json:"props"` + Hashtags string `json:"hashtags"` + Filenames StringArray `json:"filenames"` + PendingPostId string `json:"pending_post_id" db:"-"` +} + +func (o *Post) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func PostFromJson(data io.Reader) *Post { + decoder := json.NewDecoder(data) + var o Post + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func (o *Post) Etag() string { + return Etag(o.Id, o.UpdateAt) +} + +func (o *Post) IsValid() *AppError { + + if len(o.Id) != 26 { + return NewLocAppError("Post.IsValid", "model.post.is_valid.id.app_error", nil, "") + } + + if o.CreateAt == 0 { + return NewLocAppError("Post.IsValid", "model.post.is_valid.create_at.app_error", nil, "id="+o.Id) + } + + if o.UpdateAt == 0 { + return NewLocAppError("Post.IsValid", "model.post.is_valid.update_at.app_error", nil, "id="+o.Id) + } + + if len(o.UserId) != 26 { + return NewLocAppError("Post.IsValid", "model.post.is_valid.user_id.app_error", nil, "") + } + + if len(o.ChannelId) != 26 { + return NewLocAppError("Post.IsValid", "model.post.is_valid.channel_id.app_error", nil, "") + } + + if !(len(o.RootId) == 26 || len(o.RootId) == 0) { + return NewLocAppError("Post.IsValid", "model.post.is_valid.root_id.app_error", nil, "") + } + + if !(len(o.ParentId) == 26 || len(o.ParentId) == 0) { + return NewLocAppError("Post.IsValid", "model.post.is_valid.parent_id.app_error", nil, "") + } + + if len(o.ParentId) == 26 && len(o.RootId) == 0 { + return NewLocAppError("Post.IsValid", "model.post.is_valid.root_parent.app_error", nil, "") + } + + if !(len(o.OriginalId) == 26 || len(o.OriginalId) == 0) { + return NewLocAppError("Post.IsValid", "model.post.is_valid.original_id.app_error", nil, "") + } + + if utf8.RuneCountInString(o.Message) > 4000 { + return NewLocAppError("Post.IsValid", "model.post.is_valid.msg.app_error", nil, "id="+o.Id) + } + + if utf8.RuneCountInString(o.Hashtags) > 1000 { + return NewLocAppError("Post.IsValid", "model.post.is_valid.hashtags.app_error", nil, "id="+o.Id) + } + + // should be removed once more message types are supported + if !(o.Type == POST_DEFAULT || o.Type == POST_JOIN_LEAVE || o.Type == POST_SLACK_ATTACHMENT || o.Type == POST_HEADER_CHANGE) { + return NewLocAppError("Post.IsValid", "model.post.is_valid.type.app_error", nil, "id="+o.Type) + } + + if utf8.RuneCountInString(ArrayToJson(o.Filenames)) > 4000 { + return NewLocAppError("Post.IsValid", "model.post.is_valid.filenames.app_error", nil, "id="+o.Id) + } + + if utf8.RuneCountInString(StringInterfaceToJson(o.Props)) > 8000 { + return NewLocAppError("Post.IsValid", "model.post.is_valid.props.app_error", nil, "id="+o.Id) + } + + return nil +} + +func (o *Post) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + o.OriginalId = "" + + if o.CreateAt == 0 { + o.CreateAt = GetMillis() + } + + o.UpdateAt = o.CreateAt + + if o.Props == nil { + o.Props = make(map[string]interface{}) + } + + if o.Filenames == nil { + o.Filenames = []string{} + } +} + +func (o *Post) MakeNonNil() { + if o.Props == nil { + o.Props = make(map[string]interface{}) + } + if o.Filenames == nil { + o.Filenames = []string{} + } +} + +func (o *Post) AddProp(key string, value interface{}) { + + o.MakeNonNil() + + o.Props[key] = value +} + +func (o *Post) PreExport() { +} + +func (o *Post) IsSystemMessage() bool { + return len(o.Type) >= len(POST_SYSTEM_MESSAGE_PREFIX) && o.Type[:len(POST_SYSTEM_MESSAGE_PREFIX)] == POST_SYSTEM_MESSAGE_PREFIX +} diff --git a/vendor/github.com/mattermost/platform/model/post_list.go b/vendor/github.com/mattermost/platform/model/post_list.go new file mode 100644 index 00000000..4c0f5408 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/post_list.go @@ -0,0 +1,100 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type PostList struct { + Order []string `json:"order"` + Posts map[string]*Post `json:"posts"` +} + +func (o *PostList) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func (o *PostList) MakeNonNil() { + if o.Order == nil { + o.Order = make([]string, 0) + } + + if o.Posts == nil { + o.Posts = make(map[string]*Post) + } + + for _, v := range o.Posts { + v.MakeNonNil() + } +} + +func (o *PostList) AddOrder(id string) { + + if o.Order == nil { + o.Order = make([]string, 0, 128) + } + + o.Order = append(o.Order, id) +} + +func (o *PostList) AddPost(post *Post) { + + if o.Posts == nil { + o.Posts = make(map[string]*Post) + } + + o.Posts[post.Id] = post +} + +func (o *PostList) Extend(other *PostList) { + for _, postId := range other.Order { + if _, ok := o.Posts[postId]; !ok { + o.AddPost(other.Posts[postId]) + o.AddOrder(postId) + } + } +} + +func (o *PostList) Etag() string { + + id := "0" + var t int64 = 0 + + for _, v := range o.Posts { + if v.UpdateAt > t { + t = v.UpdateAt + id = v.Id + } + } + + return Etag(id, t) +} + +func (o *PostList) IsChannelId(channelId string) bool { + for _, v := range o.Posts { + if v.ChannelId != channelId { + return false + } + } + + return true +} + +func PostListFromJson(data io.Reader) *PostList { + decoder := json.NewDecoder(data) + var o PostList + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/preference.go b/vendor/github.com/mattermost/platform/model/preference.go new file mode 100644 index 00000000..b2ec9310 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/preference.go @@ -0,0 +1,66 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "unicode/utf8" +) + +const ( + PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW = "direct_channel_show" + PREFERENCE_CATEGORY_TUTORIAL_STEPS = "tutorial_step" + PREFERENCE_CATEGORY_ADVANCED_SETTINGS = "advanced_settings" + + PREFERENCE_CATEGORY_LAST = "last" + PREFERENCE_NAME_LAST_CHANNEL = "channel" +) + +type Preference struct { + UserId string `json:"user_id"` + Category string `json:"category"` + Name string `json:"name"` + Value string `json:"value"` +} + +func (o *Preference) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func PreferenceFromJson(data io.Reader) *Preference { + decoder := json.NewDecoder(data) + var o Preference + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func (o *Preference) IsValid() *AppError { + if len(o.UserId) != 26 { + return NewLocAppError("Preference.IsValid", "model.preference.is_valid.id.app_error", nil, "user_id="+o.UserId) + } + + if len(o.Category) == 0 || len(o.Category) > 32 { + return NewLocAppError("Preference.IsValid", "model.preference.is_valid.category.app_error", nil, "category="+o.Category) + } + + if len(o.Name) == 0 || len(o.Name) > 32 { + return NewLocAppError("Preference.IsValid", "model.preference.is_valid.name.app_error", nil, "name="+o.Name) + } + + if utf8.RuneCountInString(o.Value) > 128 { + return NewLocAppError("Preference.IsValid", "model.preference.is_valid.value.app_error", nil, "value="+o.Value) + } + + return nil +} diff --git a/vendor/github.com/mattermost/platform/model/preferences.go b/vendor/github.com/mattermost/platform/model/preferences.go new file mode 100644 index 00000000..f11b5fd8 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/preferences.go @@ -0,0 +1,31 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type Preferences []Preference + +func (o *Preferences) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func PreferencesFromJson(data io.Reader) (Preferences, error) { + decoder := json.NewDecoder(data) + var o Preferences + err := decoder.Decode(&o) + if err == nil { + return o, nil + } else { + return nil, err + } +} diff --git a/vendor/github.com/mattermost/platform/model/push_notification.go b/vendor/github.com/mattermost/platform/model/push_notification.go new file mode 100644 index 00000000..9196a44d --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/push_notification.go @@ -0,0 +1,49 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + PUSH_NOTIFY_APPLE = "apple" + PUSH_NOTIFY_ANDROID = "android" + + CATEGORY_DM = "DIRECT_MESSAGE" +) + +type PushNotification struct { + Platform string `json:"platform"` + ServerId string `json:"server_id"` + DeviceId string `json:"device_id"` + Category string `json:"category"` + Sound string `json:"sound"` + Message string `json:"message"` + Badge int `json:"badge"` + ContentAvailable int `json:"cont_ava"` + ChannelId string `json:"channel_id"` + ChannelName string `json:"channel_name"` +} + +func (me *PushNotification) ToJson() string { + b, err := json.Marshal(me) + if err != nil { + return "" + } else { + return string(b) + } +} + +func PushNotificationFromJson(data io.Reader) *PushNotification { + decoder := json.NewDecoder(data) + var me PushNotification + err := decoder.Decode(&me) + if err == nil { + return &me + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/search_params.go b/vendor/github.com/mattermost/platform/model/search_params.go new file mode 100644 index 00000000..d3178269 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/search_params.go @@ -0,0 +1,170 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "strings" +) + +type SearchParams struct { + Terms string + IsHashtag bool + InChannels []string + FromUsers []string +} + +var searchFlags = [...]string{"from", "channel", "in"} + +func splitWordsNoQuotes(text string) []string { + words := []string{} + + for _, word := range strings.Fields(text) { + words = append(words, word) + } + + return words +} + +func splitWords(text string) []string { + words := []string{} + + foundQuote := false + location := 0 + for i, char := range text { + if char == '"' { + if foundQuote { + // Grab the quoted section + word := text[location : i+1] + words = append(words, word) + foundQuote = false + location = i + 1 + } else { + words = append(words, splitWordsNoQuotes(text[location:i])...) + foundQuote = true + location = i + } + } + } + + words = append(words, splitWordsNoQuotes(text[location:])...) + + return words +} + +func parseSearchFlags(input []string) ([]string, [][2]string) { + words := []string{} + flags := [][2]string{} + + skipNextWord := false + for i, word := range input { + if skipNextWord { + skipNextWord = false + continue + } + + isFlag := false + + if colon := strings.Index(word, ":"); colon != -1 { + flag := word[:colon] + value := word[colon+1:] + + for _, searchFlag := range searchFlags { + // check for case insensitive equality + if strings.EqualFold(flag, searchFlag) { + if value != "" { + flags = append(flags, [2]string{searchFlag, value}) + isFlag = true + } else if i < len(input)-1 { + flags = append(flags, [2]string{searchFlag, input[i+1]}) + skipNextWord = true + isFlag = true + } + + if isFlag { + break + } + } + } + } + + if !isFlag { + // trim off surrounding punctuation (note that we leave trailing asterisks to allow wildcards) + word = puncStart.ReplaceAllString(word, "") + word = puncEndWildcard.ReplaceAllString(word, "") + + // and remove extra pound #s + word = hashtagStart.ReplaceAllString(word, "#") + + if len(word) != 0 { + words = append(words, word) + } + } + } + + return words, flags +} + +func ParseSearchParams(text string) []*SearchParams { + words, flags := parseSearchFlags(splitWords(text)) + + hashtagTermList := []string{} + plainTermList := []string{} + + for _, word := range words { + if validHashtag.MatchString(word) { + hashtagTermList = append(hashtagTermList, word) + } else { + plainTermList = append(plainTermList, word) + } + } + + hashtagTerms := strings.Join(hashtagTermList, " ") + plainTerms := strings.Join(plainTermList, " ") + + inChannels := []string{} + fromUsers := []string{} + + for _, flagPair := range flags { + flag := flagPair[0] + value := flagPair[1] + + if flag == "in" || flag == "channel" { + inChannels = append(inChannels, value) + } else if flag == "from" { + fromUsers = append(fromUsers, value) + } + } + + paramsList := []*SearchParams{} + + if len(plainTerms) > 0 { + paramsList = append(paramsList, &SearchParams{ + Terms: plainTerms, + IsHashtag: false, + InChannels: inChannels, + FromUsers: fromUsers, + }) + } + + if len(hashtagTerms) > 0 { + paramsList = append(paramsList, &SearchParams{ + Terms: hashtagTerms, + IsHashtag: true, + InChannels: inChannels, + FromUsers: fromUsers, + }) + } + + // special case for when no terms are specified but we still have a filter + if len(plainTerms) == 0 && len(hashtagTerms) == 0 && (len(inChannels) != 0 || len(fromUsers) != 0) { + paramsList = append(paramsList, &SearchParams{ + Terms: "", + IsHashtag: true, + InChannels: inChannels, + FromUsers: fromUsers, + }) + } + + return paramsList +} diff --git a/vendor/github.com/mattermost/platform/model/security_bulletin.go b/vendor/github.com/mattermost/platform/model/security_bulletin.go new file mode 100644 index 00000000..8d9be6d3 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/security_bulletin.go @@ -0,0 +1,55 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type SecurityBulletin struct { + Id string `json:"id"` + AppliesToVersion string `json:"applies_to_version"` +} + +type SecurityBulletins []SecurityBulletin + +func (me *SecurityBulletin) ToJson() string { + b, err := json.Marshal(me) + if err != nil { + return "" + } else { + return string(b) + } +} + +func SecurityBulletinFromJson(data io.Reader) *SecurityBulletin { + decoder := json.NewDecoder(data) + var o SecurityBulletin + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func (me SecurityBulletins) ToJson() string { + if b, err := json.Marshal(me); err != nil { + return "[]" + } else { + return string(b) + } +} + +func SecurityBulletinsFromJson(data io.Reader) SecurityBulletins { + decoder := json.NewDecoder(data) + var o SecurityBulletins + err := decoder.Decode(&o) + if err == nil { + return o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/session.go b/vendor/github.com/mattermost/platform/model/session.go new file mode 100644 index 00000000..bf0d9531 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/session.go @@ -0,0 +1,115 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + SESSION_COOKIE_TOKEN = "MMAUTHTOKEN" + SESSION_CACHE_SIZE = 10000 + SESSION_PROP_PLATFORM = "platform" + SESSION_PROP_OS = "os" + SESSION_PROP_BROWSER = "browser" +) + +type Session struct { + Id string `json:"id"` + Token string `json:"token"` + CreateAt int64 `json:"create_at"` + ExpiresAt int64 `json:"expires_at"` + LastActivityAt int64 `json:"last_activity_at"` + UserId string `json:"user_id"` + TeamId string `json:"team_id"` + DeviceId string `json:"device_id"` + Roles string `json:"roles"` + IsOAuth bool `json:"is_oauth"` + Props StringMap `json:"props"` +} + +func (me *Session) ToJson() string { + b, err := json.Marshal(me) + if err != nil { + return "" + } else { + return string(b) + } +} + +func SessionFromJson(data io.Reader) *Session { + decoder := json.NewDecoder(data) + var me Session + err := decoder.Decode(&me) + if err == nil { + return &me + } else { + return nil + } +} + +func (me *Session) PreSave() { + if me.Id == "" { + me.Id = NewId() + } + + me.Token = NewId() + + me.CreateAt = GetMillis() + me.LastActivityAt = me.CreateAt + + if me.Props == nil { + me.Props = make(map[string]string) + } +} + +func (me *Session) Sanitize() { + me.Token = "" +} + +func (me *Session) IsExpired() bool { + + if me.ExpiresAt <= 0 { + return false + } + + if GetMillis() > me.ExpiresAt { + return true + } + + return false +} + +func (me *Session) SetExpireInDays(days int) { + me.ExpiresAt = GetMillis() + (1000 * 60 * 60 * 24 * int64(days)) +} + +func (me *Session) AddProp(key string, value string) { + + if me.Props == nil { + me.Props = make(map[string]string) + } + + me.Props[key] = value +} + +func SessionsToJson(o []*Session) string { + if b, err := json.Marshal(o); err != nil { + return "[]" + } else { + return string(b) + } +} + +func SessionsFromJson(data io.Reader) []*Session { + decoder := json.NewDecoder(data) + var o []*Session + err := decoder.Decode(&o) + if err == nil { + return o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/suggest_command.go b/vendor/github.com/mattermost/platform/model/suggest_command.go new file mode 100644 index 00000000..7bc35369 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/suggest_command.go @@ -0,0 +1,34 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +type SuggestCommand struct { + Suggestion string `json:"suggestion"` + Description string `json:"description"` +} + +func (o *SuggestCommand) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func SuggestCommandFromJson(data io.Reader) *SuggestCommand { + decoder := json.NewDecoder(data) + var o SuggestCommand + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/system.go b/vendor/github.com/mattermost/platform/model/system.go new file mode 100644 index 00000000..68d542c1 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/system.go @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "io" +) + +const ( + SYSTEM_DIAGNOSTIC_ID = "DiagnosticId" + SYSTEM_RAN_UNIT_TESTS = "RanUnitTests" + SYSTEM_LAST_SECURITY_TIME = "LastSecurityTime" + SYSTEM_ACTIVE_LICENSE_ID = "ActiveLicenseId" + SYSTEM_LAST_COMPLIANCE_TIME = "LastComplianceTime" +) + +type System struct { + Name string `json:"name"` + Value string `json:"value"` +} + +func (o *System) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func SystemFromJson(data io.Reader) *System { + decoder := json.NewDecoder(data) + var o System + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} diff --git a/vendor/github.com/mattermost/platform/model/team.go b/vendor/github.com/mattermost/platform/model/team.go new file mode 100644 index 00000000..d95dea11 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/team.go @@ -0,0 +1,243 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + TEAM_OPEN = "O" + TEAM_INVITE = "I" +) + +type Team struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at"` + UpdateAt int64 `json:"update_at"` + DeleteAt int64 `json:"delete_at"` + DisplayName string `json:"display_name"` + Name string `json:"name"` + Email string `json:"email"` + Type string `json:"type"` + CompanyName string `json:"company_name"` + AllowedDomains string `json:"allowed_domains"` + InviteId string `json:"invite_id"` + AllowOpenInvite bool `json:"allow_open_invite"` + AllowTeamListing bool `json:"allow_team_listing"` +} + +type Invites struct { + Invites []map[string]string `json:"invites"` +} + +func InvitesFromJson(data io.Reader) *Invites { + decoder := json.NewDecoder(data) + var o Invites + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func (o *Invites) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func (o *Team) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} + +func TeamFromJson(data io.Reader) *Team { + decoder := json.NewDecoder(data) + var o Team + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + return nil + } +} + +func TeamMapToJson(u map[string]*Team) string { + b, err := json.Marshal(u) + if err != nil { + return "" + } else { + return string(b) + } +} + +func TeamMapFromJson(data io.Reader) map[string]*Team { + decoder := json.NewDecoder(data) + var teams map[string]*Team + err := decoder.Decode(&teams) + if err == nil { + return teams + } else { + return nil + } +} + +func (o *Team) Etag() string { + return Etag(o.Id, o.UpdateAt) +} + +func (o *Team) IsValid(restrictTeamNames bool) *AppError { + + if len(o.Id) != 26 { + return NewLocAppError("Team.IsValid", "model.team.is_valid.id.app_error", nil, "") + } + + if o.CreateAt == 0 { + return NewLocAppError("Team.IsValid", "model.team.is_valid.create_at.app_error", nil, "id="+o.Id) + } + + if o.UpdateAt == 0 { + return NewLocAppError("Team.IsValid", "model.team.is_valid.update_at.app_error", nil, "id="+o.Id) + } + + if len(o.Email) > 128 { + return NewLocAppError("Team.IsValid", "model.team.is_valid.email.app_error", nil, "id="+o.Id) + } + + if len(o.Email) > 0 && !IsValidEmail(o.Email) { + return NewLocAppError("Team.IsValid", "model.team.is_valid.email.app_error", nil, "id="+o.Id) + } + + if utf8.RuneCountInString(o.DisplayName) == 0 || utf8.RuneCountInString(o.DisplayName) > 64 { + return NewLocAppError("Team.IsValid", "model.team.is_valid.name.app_error", nil, "id="+o.Id) + } + + if len(o.Name) > 64 { + return NewLocAppError("Team.IsValid", "model.team.is_valid.url.app_error", nil, "id="+o.Id) + } + + if restrictTeamNames && IsReservedTeamName(o.Name) { + return NewLocAppError("Team.IsValid", "model.team.is_valid.reserved.app_error", nil, "id="+o.Id) + } + + if !IsValidTeamName(o.Name) { + return NewLocAppError("Team.IsValid", "model.team.is_valid.characters.app_error", nil, "id="+o.Id) + } + + if !(o.Type == TEAM_OPEN || o.Type == TEAM_INVITE) { + return NewLocAppError("Team.IsValid", "model.team.is_valid.type.app_error", nil, "id="+o.Id) + } + + if len(o.CompanyName) > 64 { + return NewLocAppError("Team.IsValid", "model.team.is_valid.company.app_error", nil, "id="+o.Id) + } + + if len(o.AllowedDomains) > 500 { + return NewLocAppError("Team.IsValid", "model.team.is_valid.domains.app_error", nil, "id="+o.Id) + } + + return nil +} + +func (o *Team) PreSave() { + if o.Id == "" { + o.Id = NewId() + } + + o.CreateAt = GetMillis() + o.UpdateAt = o.CreateAt + + if len(o.InviteId) == 0 { + o.InviteId = NewId() + } +} + +func (o *Team) PreUpdate() { + o.UpdateAt = GetMillis() +} + +func IsReservedTeamName(s string) bool { + s = strings.ToLower(s) + + for _, value := range reservedName { + if strings.Index(s, value) == 0 { + return true + } + } + + return false +} + +func IsValidTeamName(s string) bool { + + if !IsValidAlphaNum(s, false) { + return false + } + + if len(s) <= 3 { + return false + } + + return true +} + +var validTeamNameCharacter = regexp.MustCompile(`^[a-z0-9-]$`) + +func CleanTeamName(s string) string { + s = strings.ToLower(strings.Replace(s, " ", "-", -1)) + + for _, value := range reservedName { + if strings.Index(s, value) == 0 { + s = strings.Replace(s, value, "", -1) + } + } + + s = strings.TrimSpace(s) + + for _, c := range s { + char := fmt.Sprintf("%c", c) + if !validTeamNameCharacter.MatchString(char) { + s = strings.Replace(s, char, "", -1) + } + } + + s = strings.Trim(s, "-") + + if !IsValidTeamName(s) { + s = NewId() + } + + return s +} + +func (o *Team) PreExport() { +} + +func (o *Team) Sanitize() { + o.Email = "" + o.AllowedDomains = "" +} + +func (o *Team) SanitizeForNotLoggedIn() { + o.Email = "" + o.AllowedDomains = "" + o.CompanyName = "" + if !o.AllowOpenInvite { + o.InviteId = "" + } +} diff --git a/vendor/github.com/mattermost/platform/model/team_signup.go b/vendor/github.com/mattermost/platform/model/team_signup.go new file mode 100644 index 00000000..e3642044 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/team_signup.go @@ -0,0 +1,40 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" +) + +type TeamSignup struct { + Team Team `json:"team"` + User User `json:"user"` + Invites []string `json:"invites"` + Data string `json:"data"` + Hash string `json:"hash"` +} + +func TeamSignupFromJson(data io.Reader) *TeamSignup { + decoder := json.NewDecoder(data) + var o TeamSignup + err := decoder.Decode(&o) + if err == nil { + return &o + } else { + fmt.Println(err) + + return nil + } +} + +func (o *TeamSignup) ToJson() string { + b, err := json.Marshal(o) + if err != nil { + return "" + } else { + return string(b) + } +} diff --git a/vendor/github.com/mattermost/platform/model/user.go b/vendor/github.com/mattermost/platform/model/user.go new file mode 100644 index 00000000..173fe2b4 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/user.go @@ -0,0 +1,481 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "encoding/json" + "fmt" + "io" + "regexp" + "strings" + "unicode/utf8" + + "golang.org/x/crypto/bcrypt" +) + +const ( + ROLE_TEAM_ADMIN = "admin" + ROLE_SYSTEM_ADMIN = "system_admin" + USER_AWAY_TIMEOUT = 5 * 60 * 1000 // 5 minutes + USER_OFFLINE_TIMEOUT = 1 * 60 * 1000 // 1 minute + USER_OFFLINE = "offline" + USER_AWAY = "away" + USER_ONLINE = "online" + USER_NOTIFY_ALL = "all" + USER_NOTIFY_MENTION = "mention" + USER_NOTIFY_NONE = "none" + DEFAULT_LOCALE = "en" + USER_AUTH_SERVICE_EMAIL = "email" + USER_AUTH_SERVICE_USERNAME = "username" +) + +type User struct { + Id string `json:"id"` + CreateAt int64 `json:"create_at,omitempty"` + UpdateAt int64 `json:"update_at,omitempty"` + DeleteAt int64 `json:"delete_at"` + TeamId string `json:"team_id"` + Username string `json:"username"` + Password string `json:"password,omitempty"` + AuthData string `json:"auth_data,omitempty"` + AuthService string `json:"auth_service"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified,omitempty"` + Nickname string `json:"nickname"` + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + Roles string `json:"roles"` + LastActivityAt int64 `json:"last_activity_at,omitempty"` + LastPingAt int64 `json:"last_ping_at,omitempty"` + AllowMarketing bool `json:"allow_marketing,omitempty"` + Props StringMap `json:"props,omitempty"` + NotifyProps StringMap `json:"notify_props,omitempty"` + ThemeProps StringMap `json:"theme_props,omitempty"` + LastPasswordUpdate int64 `json:"last_password_update,omitempty"` + LastPictureUpdate int64 `json:"last_picture_update,omitempty"` + FailedAttempts int `json:"failed_attempts,omitempty"` + Locale string `json:"locale"` + MfaActive bool `json:"mfa_active,omitempty"` + MfaSecret string `json:"mfa_secret,omitempty"` +} + +// IsValid validates the user and returns an error if it isn't configured +// correctly. +func (u *User) IsValid() *AppError { + + if len(u.Id) != 26 { + return NewLocAppError("User.IsValid", "model.user.is_valid.id.app_error", nil, "") + } + + if u.CreateAt == 0 { + return NewLocAppError("User.IsValid", "model.user.is_valid.create_at.app_error", nil, "user_id="+u.Id) + } + + if u.UpdateAt == 0 { + return NewLocAppError("User.IsValid", "model.user.is_valid.update_at.app_error", nil, "user_id="+u.Id) + } + + if len(u.TeamId) != 26 { + return NewLocAppError("User.IsValid", "model.user.is_valid.team_id.app_error", nil, "") + } + + if !IsValidUsername(u.Username) { + return NewLocAppError("User.IsValid", "model.user.is_valid.username.app_error", nil, "user_id="+u.Id) + } + + if len(u.Email) > 128 || len(u.Email) == 0 { + return NewLocAppError("User.IsValid", "model.user.is_valid.email.app_error", nil, "user_id="+u.Id) + } + + if utf8.RuneCountInString(u.Nickname) > 64 { + return NewLocAppError("User.IsValid", "model.user.is_valid.nickname.app_error", nil, "user_id="+u.Id) + } + + if utf8.RuneCountInString(u.FirstName) > 64 { + return NewLocAppError("User.IsValid", "model.user.is_valid.first_name.app_error", nil, "user_id="+u.Id) + } + + if utf8.RuneCountInString(u.LastName) > 64 { + return NewLocAppError("User.IsValid", "model.user.is_valid.last_name.app_error", nil, "user_id="+u.Id) + } + + if len(u.Password) > 128 { + return NewLocAppError("User.IsValid", "model.user.is_valid.pwd.app_error", nil, "user_id="+u.Id) + } + + if len(u.AuthData) > 128 { + return NewLocAppError("User.IsValid", "model.user.is_valid.auth_data.app_error", nil, "user_id="+u.Id) + } + + if len(u.AuthData) > 0 && len(u.AuthService) == 0 { + return NewLocAppError("User.IsValid", "model.user.is_valid.auth_data_type.app_error", nil, "user_id="+u.Id) + } + + if len(u.Password) > 0 && len(u.AuthData) > 0 { + return NewLocAppError("User.IsValid", "model.user.is_valid.auth_data_pwd.app_error", nil, "user_id="+u.Id) + } + + if len(u.ThemeProps) > 2000 { + return NewLocAppError("User.IsValid", "model.user.is_valid.theme.app_error", nil, "user_id="+u.Id) + } + + return nil +} + +// PreSave will set the Id and Username if missing. It will also fill +// in the CreateAt, UpdateAt times. It will also hash the password. It should +// be run before saving the user to the db. +func (u *User) PreSave() { + if u.Id == "" { + u.Id = NewId() + } + + if u.Username == "" { + u.Username = NewId() + } + + u.Username = strings.ToLower(u.Username) + u.Email = strings.ToLower(u.Email) + u.Locale = strings.ToLower(u.Locale) + + u.CreateAt = GetMillis() + u.UpdateAt = u.CreateAt + + u.LastPasswordUpdate = u.CreateAt + + u.MfaActive = false + + if u.Locale == "" { + u.Locale = DEFAULT_LOCALE + } + + if u.Props == nil { + u.Props = make(map[string]string) + } + + if u.NotifyProps == nil || len(u.NotifyProps) == 0 { + u.SetDefaultNotifications() + } + + if len(u.Password) > 0 { + u.Password = HashPassword(u.Password) + } +} + +// PreUpdate should be run before updating the user in the db. +func (u *User) PreUpdate() { + u.Username = strings.ToLower(u.Username) + u.Email = strings.ToLower(u.Email) + u.Locale = strings.ToLower(u.Locale) + u.UpdateAt = GetMillis() + + if u.NotifyProps == nil || len(u.NotifyProps) == 0 { + u.SetDefaultNotifications() + } else if _, ok := u.NotifyProps["mention_keys"]; ok { + // Remove any blank mention keys + splitKeys := strings.Split(u.NotifyProps["mention_keys"], ",") + goodKeys := []string{} + for _, key := range splitKeys { + if len(key) > 0 { + goodKeys = append(goodKeys, strings.ToLower(key)) + } + } + u.NotifyProps["mention_keys"] = strings.Join(goodKeys, ",") + } +} + +func (u *User) SetDefaultNotifications() { + u.NotifyProps = make(map[string]string) + u.NotifyProps["email"] = "true" + u.NotifyProps["desktop"] = USER_NOTIFY_ALL + u.NotifyProps["desktop_sound"] = "true" + u.NotifyProps["mention_keys"] = u.Username + ",@" + u.Username + u.NotifyProps["first_name"] = "false" + u.NotifyProps["all"] = "true" + u.NotifyProps["channel"] = "true" + splitName := strings.Split(u.Nickname, " ") + if len(splitName) > 0 && splitName[0] != "" { + u.NotifyProps["first_name"] = "true" + u.NotifyProps["mention_keys"] += "," + splitName[0] + } +} + +// ToJson convert a User to a json string +func (u *User) ToJson() string { + b, err := json.Marshal(u) + if err != nil { + return "" + } else { + return string(b) + } +} + +// Generate a valid strong etag so the browser can cache the results +func (u *User) Etag() string { + return Etag(u.Id, u.UpdateAt) +} + +func (u *User) IsOffline() bool { + return (GetMillis()-u.LastPingAt) > USER_OFFLINE_TIMEOUT && (GetMillis()-u.LastActivityAt) > USER_OFFLINE_TIMEOUT +} + +func (u *User) IsAway() bool { + return (GetMillis() - u.LastActivityAt) > USER_AWAY_TIMEOUT +} + +// Remove any private data from the user object +func (u *User) Sanitize(options map[string]bool) { + u.Password = "" + u.AuthData = "" + + if len(options) != 0 && !options["email"] { + u.Email = "" + } + if len(options) != 0 && !options["fullname"] { + u.FirstName = "" + u.LastName = "" + } + if len(options) != 0 && !options["passwordupdate"] { + u.LastPasswordUpdate = 0 + } +} + +func (u *User) ClearNonProfileFields() { + u.UpdateAt = 0 + u.Password = "" + u.AuthData = "" + u.AuthService = "" + u.EmailVerified = false + u.LastPingAt = 0 + u.AllowMarketing = false + u.Props = StringMap{} + u.NotifyProps = StringMap{} + u.ThemeProps = StringMap{} + u.LastPasswordUpdate = 0 + u.LastPictureUpdate = 0 + u.FailedAttempts = 0 +} + +func (u *User) MakeNonNil() { + if u.Props == nil { + u.Props = make(map[string]string) + } + + if u.NotifyProps == nil { + u.NotifyProps = make(map[string]string) + } +} + +func (u *User) AddProp(key string, value string) { + u.MakeNonNil() + + u.Props[key] = value +} + +func (u *User) AddNotifyProp(key string, value string) { + u.MakeNonNil() + + u.NotifyProps[key] = value +} + +func (u *User) GetFullName() string { + if u.FirstName != "" && u.LastName != "" { + return u.FirstName + " " + u.LastName + } else if u.FirstName != "" { + return u.FirstName + } else if u.LastName != "" { + return u.LastName + } else { + return "" + } +} + +func (u *User) GetDisplayName() string { + if u.Nickname != "" { + return u.Nickname + } else if fullName := u.GetFullName(); fullName != "" { + return fullName + } else { + return u.Username + } +} + +func IsValidRoles(userRoles string) bool { + + roles := strings.Split(userRoles, " ") + + for _, r := range roles { + if !isValidRole(r) { + return false + } + } + + return true +} + +func isValidRole(role string) bool { + if role == "" { + return true + } + + if role == ROLE_TEAM_ADMIN { + return true + } + + if role == ROLE_SYSTEM_ADMIN { + return true + } + + return false +} + +// Make sure you acually want to use this function. In context.go there are functions to check permssions +// This function should not be used to check permissions. +func (u *User) IsInRole(inRole string) bool { + return IsInRole(u.Roles, inRole) +} + +// Make sure you acually want to use this function. In context.go there are functions to check permssions +// This function should not be used to check permissions. +func IsInRole(userRoles string, inRole string) bool { + roles := strings.Split(userRoles, " ") + + for _, r := range roles { + if r == inRole { + return true + } + + } + + return false +} + +func (u *User) IsSSOUser() bool { + if len(u.AuthData) != 0 && len(u.AuthService) != 0 && u.AuthService != USER_AUTH_SERVICE_LDAP { + return true + } + return false +} + +func (u *User) IsLDAPUser() bool { + if u.AuthService == USER_AUTH_SERVICE_LDAP { + return true + } + return false +} + +func (u *User) PreExport() { + u.Password = "" + u.AuthData = "" + u.LastActivityAt = 0 + u.LastPingAt = 0 + u.LastPasswordUpdate = 0 + u.LastPictureUpdate = 0 + u.FailedAttempts = 0 +} + +// UserFromJson will decode the input and return a User +func UserFromJson(data io.Reader) *User { + decoder := json.NewDecoder(data) + var user User + err := decoder.Decode(&user) + if err == nil { + return &user + } else { + return nil + } +} + +func UserMapToJson(u map[string]*User) string { + b, err := json.Marshal(u) + if err != nil { + return "" + } else { + return string(b) + } +} + +func UserMapFromJson(data io.Reader) map[string]*User { + decoder := json.NewDecoder(data) + var users map[string]*User + err := decoder.Decode(&users) + if err == nil { + return users + } else { + return nil + } +} + +// HashPassword generates a hash using the bcrypt.GenerateFromPassword +func HashPassword(password string) string { + hash, err := bcrypt.GenerateFromPassword([]byte(password), 10) + if err != nil { + panic(err) + } + + return string(hash) +} + +// ComparePassword compares the hash +func ComparePassword(hash string, password string) bool { + + if len(password) == 0 { + return false + } + + err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) + return err == nil +} + +var validUsernameChars = regexp.MustCompile(`^[a-z0-9\.\-_]+$`) + +var restrictedUsernames = []string{ + "all", + "channel", +} + +func IsValidUsername(s string) bool { + if len(s) == 0 || len(s) > 64 { + return false + } + + if !validUsernameChars.MatchString(s) { + return false + } + + for _, restrictedUsername := range restrictedUsernames { + if s == restrictedUsername { + return false + } + } + + return true +} + +func CleanUsername(s string) string { + s = strings.ToLower(strings.Replace(s, " ", "-", -1)) + + for _, value := range reservedName { + if s == value { + s = strings.Replace(s, value, "", -1) + } + } + + s = strings.TrimSpace(s) + + for _, c := range s { + char := fmt.Sprintf("%c", c) + if !validUsernameChars.MatchString(char) { + s = strings.Replace(s, char, "-", -1) + } + } + + s = strings.Trim(s, "-") + + if !IsValidUsername(s) { + s = "a" + NewId() + } + + return s +} diff --git a/vendor/github.com/mattermost/platform/model/utils.go b/vendor/github.com/mattermost/platform/model/utils.go new file mode 100644 index 00000000..1ce41bb3 --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/utils.go @@ -0,0 +1,381 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "bytes" + "crypto/rand" + "encoding/base32" + "encoding/json" + "fmt" + "io" + "net/mail" + "net/url" + "regexp" + "strings" + "time" + + goi18n "github.com/nicksnyder/go-i18n/i18n" + "github.com/pborman/uuid" +) + +type StringInterface map[string]interface{} +type StringMap map[string]string +type StringArray []string +type EncryptStringMap map[string]string + +type AppError struct { + Id string `json:"id"` + Message string `json:"message"` // Message to be display to the end user without debugging information + DetailedError string `json:"detailed_error"` // Internal error string to help the developer + RequestId string `json:"request_id"` // The RequestId that's also set in the header + StatusCode int `json:"status_code"` // The http status code + Where string `json:"-"` // The function where it happened in the form of Struct.Func + IsOAuth bool `json:"is_oauth"` // Whether the error is OAuth specific + params map[string]interface{} `json:"-"` +} + +func (er *AppError) Error() string { + return er.Where + ": " + er.Message + ", " + er.DetailedError +} + +func (er *AppError) Translate(T goi18n.TranslateFunc) { + if len(er.Message) == 0 { + if er.params == nil { + er.Message = T(er.Id) + } else { + er.Message = T(er.Id, er.params) + } + } +} + +func (er *AppError) ToJson() string { + b, err := json.Marshal(er) + if err != nil { + return "" + } else { + return string(b) + } +} + +// AppErrorFromJson will decode the input and return an AppError +func AppErrorFromJson(data io.Reader) *AppError { + decoder := json.NewDecoder(data) + var er AppError + err := decoder.Decode(&er) + if err == nil { + return &er + } else { + return NewLocAppError("AppErrorFromJson", "model.utils.decode_json.app_error", nil, err.Error()) + } +} + +func NewLocAppError(where string, id string, params map[string]interface{}, details string) *AppError { + ap := &AppError{} + ap.Id = id + ap.params = params + ap.Where = where + ap.DetailedError = details + ap.StatusCode = 500 + ap.IsOAuth = false + return ap +} + +var encoding = base32.NewEncoding("ybndrfg8ejkmcpqxot1uwisza345h769") + +// NewId is a globally unique identifier. It is a [A-Z0-9] string 26 +// characters long. It is a UUID version 4 Guid that is zbased32 encoded +// with the padding stripped off. +func NewId() string { + var b bytes.Buffer + encoder := base32.NewEncoder(encoding, &b) + encoder.Write(uuid.NewRandom()) + encoder.Close() + b.Truncate(26) // removes the '==' padding + return b.String() +} + +func NewRandomString(length int) string { + var b bytes.Buffer + str := make([]byte, length+8) + rand.Read(str) + encoder := base32.NewEncoder(encoding, &b) + encoder.Write(str) + encoder.Close() + b.Truncate(length) // removes the '==' padding + return b.String() +} + +// GetMillis is a convience method to get milliseconds since epoch. +func GetMillis() int64 { + return time.Now().UnixNano() / int64(time.Millisecond) +} + +// MapToJson converts a map to a json string +func MapToJson(objmap map[string]string) string { + if b, err := json.Marshal(objmap); err != nil { + return "" + } else { + return string(b) + } +} + +// MapFromJson will decode the key/value pair map +func MapFromJson(data io.Reader) map[string]string { + decoder := json.NewDecoder(data) + + var objmap map[string]string + if err := decoder.Decode(&objmap); err != nil { + return make(map[string]string) + } else { + return objmap + } +} + +func ArrayToJson(objmap []string) string { + if b, err := json.Marshal(objmap); err != nil { + return "" + } else { + return string(b) + } +} + +func ArrayFromJson(data io.Reader) []string { + decoder := json.NewDecoder(data) + + var objmap []string + if err := decoder.Decode(&objmap); err != nil { + return make([]string, 0) + } else { + return objmap + } +} + +func StringInterfaceToJson(objmap map[string]interface{}) string { + if b, err := json.Marshal(objmap); err != nil { + return "" + } else { + return string(b) + } +} + +func StringInterfaceFromJson(data io.Reader) map[string]interface{} { + decoder := json.NewDecoder(data) + + var objmap map[string]interface{} + if err := decoder.Decode(&objmap); err != nil { + return make(map[string]interface{}) + } else { + return objmap + } +} + +func IsLower(s string) bool { + if strings.ToLower(s) == s { + return true + } + + return false +} + +func IsValidEmail(email string) bool { + + if !IsLower(email) { + return false + } + + if _, err := mail.ParseAddress(email); err == nil { + return true + } + + return false +} + +var reservedName = []string{ + "www", + "web", + "admin", + "support", + "notify", + "test", + "demo", + "mail", + "team", + "channel", + "internal", + "localhost", + "dockerhost", + "stag", + "post", + "cluster", + "api", + "oauth", +} + +var wwwStart = regexp.MustCompile(`^www`) +var betaStart = regexp.MustCompile(`^beta`) +var ciStart = regexp.MustCompile(`^ci`) + +func GetSubDomain(s string) (string, string) { + s = strings.Replace(s, "http://", "", 1) + s = strings.Replace(s, "https://", "", 1) + + match := wwwStart.MatchString(s) + if match { + return "", "" + } + + match = betaStart.MatchString(s) + if match { + return "", "" + } + + match = ciStart.MatchString(s) + if match { + return "", "" + } + + parts := strings.Split(s, ".") + + if len(parts) != 3 { + return "", "" + } + + return parts[0], parts[1] +} + +func IsValidChannelIdentifier(s string) bool { + + if !IsValidAlphaNum(s, true) { + return false + } + + if len(s) < 2 { + return false + } + + return true +} + +var validAlphaNumUnderscore = regexp.MustCompile(`^[a-z0-9]+([a-z\-\_0-9]+|(__)?)[a-z0-9]+$`) +var validAlphaNum = regexp.MustCompile(`^[a-z0-9]+([a-z\-0-9]+|(__)?)[a-z0-9]+$`) + +func IsValidAlphaNum(s string, allowUnderscores bool) bool { + var match bool + if allowUnderscores { + match = validAlphaNumUnderscore.MatchString(s) + } else { + match = validAlphaNum.MatchString(s) + } + + if !match { + return false + } + + return true +} + +func Etag(parts ...interface{}) string { + + etag := CurrentVersion + + for _, part := range parts { + etag += fmt.Sprintf(".%v", part) + } + + return etag +} + +var validHashtag = regexp.MustCompile(`^(#[A-Za-zäöüÄÖÜß]+[A-Za-z0-9äöüÄÖÜß_\-]*[A-Za-z0-9äöüÄÖÜß])$`) +var puncStart = regexp.MustCompile(`^[.,()&$!\?\[\]{}':;\\<>\-+=%^*|]+`) +var hashtagStart = regexp.MustCompile(`^#{2,}`) +var puncEnd = regexp.MustCompile(`[.,()&$#!\?\[\]{}':;\\<>\-+=%^*|]+$`) +var puncEndWildcard = regexp.MustCompile(`[.,()&$#!\?\[\]{}':;\\<>\-+=%^|]+$`) + +func ParseHashtags(text string) (string, string) { + words := strings.Fields(text) + + hashtagString := "" + plainString := "" + for _, word := range words { + // trim off surrounding punctuation + word = puncStart.ReplaceAllString(word, "") + word = puncEnd.ReplaceAllString(word, "") + + // and remove extra pound #s + word = hashtagStart.ReplaceAllString(word, "#") + + if validHashtag.MatchString(word) { + hashtagString += " " + word + } else { + plainString += " " + word + } + } + + if len(hashtagString) > 1000 { + hashtagString = hashtagString[:999] + lastSpace := strings.LastIndex(hashtagString, " ") + if lastSpace > -1 { + hashtagString = hashtagString[:lastSpace] + } else { + hashtagString = "" + } + } + + return strings.TrimSpace(hashtagString), strings.TrimSpace(plainString) +} + +func IsFileExtImage(ext string) bool { + ext = strings.ToLower(ext) + for _, imgExt := range IMAGE_EXTENSIONS { + if ext == imgExt { + return true + } + } + return false +} + +func GetImageMimeType(ext string) string { + ext = strings.ToLower(ext) + if len(IMAGE_MIME_TYPES[ext]) == 0 { + return "image" + } else { + return IMAGE_MIME_TYPES[ext] + } +} + +func ClearMentionTags(post string) string { + post = strings.Replace(post, "", "", -1) + post = strings.Replace(post, "", "", -1) + return post +} + +var UrlRegex = regexp.MustCompile(`^((?:[a-z]+:\/\/)?(?:(?:[a-z0-9\-]+\.)+(?:[a-z]{2}|aero|arpa|biz|com|coop|edu|gov|info|int|jobs|mil|museum|name|nato|net|org|pro|travel|local|internal))(:[0-9]{1,5})?(?:\/[a-z0-9_\-\.~]+)*(\/([a-z0-9_\-\.]*)(?:\?[a-z0-9+_~\-\.%=&]*)?)?(?:#[a-zA-Z0-9!$&'()*+.=-_~:@/?]*)?)(?:\s+|$)$`) +var PartialUrlRegex = regexp.MustCompile(`/([A-Za-z0-9]{26})/([A-Za-z0-9]{26})/((?:[A-Za-z0-9]{26})?.+(?:\.[A-Za-z0-9]{3,})?)`) + +var SplitRunes = map[rune]bool{',': true, ' ': true, '.': true, '!': true, '?': true, ':': true, ';': true, '\n': true, '<': true, '>': true, '(': true, ')': true, '{': true, '}': true, '[': true, ']': true, '+': true, '/': true, '\\': true} + +func IsValidHttpUrl(rawUrl string) bool { + if strings.Index(rawUrl, "http://") != 0 && strings.Index(rawUrl, "https://") != 0 { + return false + } + + if _, err := url.ParseRequestURI(rawUrl); err != nil { + return false + } + + return true +} + +func IsValidHttpsUrl(rawUrl string) bool { + if strings.Index(rawUrl, "https://") != 0 { + return false + } + + if _, err := url.ParseRequestURI(rawUrl); err != nil { + return false + } + + return true +} diff --git a/vendor/github.com/mattermost/platform/model/version.go b/vendor/github.com/mattermost/platform/model/version.go new file mode 100644 index 00000000..b3950dcc --- /dev/null +++ b/vendor/github.com/mattermost/platform/model/version.go @@ -0,0 +1,127 @@ +// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package model + +import ( + "fmt" + "strconv" + "strings" +) + +// This is a list of all the current viersions including any patches. +// It should be maitained in chronological order with most current +// release at the front of the list. +var versions = []string{ + "2.1.0", + "2.0.0", + "1.4.0", + "1.3.0", + "1.2.1", + "1.2.0", + "1.1.0", + "1.0.0", + "0.7.1", + "0.7.0", + "0.6.0", + "0.5.0", +} + +var CurrentVersion string = versions[0] +var BuildNumber string +var BuildDate string +var BuildHash string +var BuildEnterpriseReady string +var versionsWithoutHotFixes []string + +func init() { + versionsWithoutHotFixes = make([]string, 0, len(versions)) + seen := make(map[string]string) + + for _, version := range versions { + maj, min, _ := SplitVersion(version) + verStr := fmt.Sprintf("%v.%v.0", maj, min) + + if seen[verStr] == "" { + versionsWithoutHotFixes = append(versionsWithoutHotFixes, verStr) + seen[verStr] = verStr + } + } +} + +func SplitVersion(version string) (int64, int64, int64) { + parts := strings.Split(version, ".") + + major := int64(0) + minor := int64(0) + patch := int64(0) + + if len(parts) > 0 { + major, _ = strconv.ParseInt(parts[0], 10, 64) + } + + if len(parts) > 1 { + minor, _ = strconv.ParseInt(parts[1], 10, 64) + } + + if len(parts) > 2 { + patch, _ = strconv.ParseInt(parts[2], 10, 64) + } + + return major, minor, patch +} + +func GetPreviousVersion(version string) string { + verMajor, verMinor, _ := SplitVersion(version) + verStr := fmt.Sprintf("%v.%v.0", verMajor, verMinor) + + for index, v := range versionsWithoutHotFixes { + if v == verStr && len(versionsWithoutHotFixes) > index+1 { + return versionsWithoutHotFixes[index+1] + } + } + + return "" +} + +func IsOfficalBuild() bool { + return BuildNumber != "_BUILD_NUMBER_" +} + +func IsCurrentVersion(versionToCheck string) bool { + currentMajor, currentMinor, _ := SplitVersion(CurrentVersion) + toCheckMajor, toCheckMinor, _ := SplitVersion(versionToCheck) + + if toCheckMajor == currentMajor && toCheckMinor == currentMinor { + return true + } else { + return false + } +} + +func IsPreviousVersionsSupported(versionToCheck string) bool { + toCheckMajor, toCheckMinor, _ := SplitVersion(versionToCheck) + versionToCheckStr := fmt.Sprintf("%v.%v.0", toCheckMajor, toCheckMinor) + + // Current Supported + if versionsWithoutHotFixes[0] == versionToCheckStr { + return true + } + + // Current - 1 Supported + if versionsWithoutHotFixes[1] == versionToCheckStr { + return true + } + + // Current - 2 Supported + if versionsWithoutHotFixes[2] == versionToCheckStr { + return true + } + + // Current - 3 Supported + if versionsWithoutHotFixes[3] == versionToCheckStr { + return true + } + + return false +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/LICENSE b/vendor/github.com/nicksnyder/go-i18n/i18n/LICENSE new file mode 100644 index 00000000..609cce79 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Nick Snyder https://github.com/nicksnyder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go b/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go new file mode 100644 index 00000000..e93db95d --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/bundle/bundle.go @@ -0,0 +1,315 @@ +// Package bundle manages translations for multiple languages. +package bundle + +import ( + "encoding/json" + "fmt" + "gopkg.in/yaml.v2" + "io/ioutil" + "reflect" + + "path/filepath" + + "github.com/nicksnyder/go-i18n/i18n/language" + "github.com/nicksnyder/go-i18n/i18n/translation" +) + +// TranslateFunc is a copy of i18n.TranslateFunc to avoid a circular dependency. +type TranslateFunc func(translationID string, args ...interface{}) string + +// Bundle stores the translations for multiple languages. +type Bundle struct { + // The primary translations for a language tag and translation id. + translations map[string]map[string]translation.Translation + + // Translations that can be used when an exact language match is not possible. + fallbackTranslations map[string]map[string]translation.Translation +} + +// New returns an empty bundle. +func New() *Bundle { + return &Bundle{ + translations: make(map[string]map[string]translation.Translation), + fallbackTranslations: make(map[string]map[string]translation.Translation), + } +} + +// MustLoadTranslationFile is similar to LoadTranslationFile +// except it panics if an error happens. +func (b *Bundle) MustLoadTranslationFile(filename string) { + if err := b.LoadTranslationFile(filename); err != nil { + panic(err) + } +} + +// LoadTranslationFile loads the translations from filename into memory. +// +// The language that the translations are associated with is parsed from the filename (e.g. en-US.json). +// +// Generally you should load translation files once during your program's initialization. +func (b *Bundle) LoadTranslationFile(filename string) error { + buf, err := ioutil.ReadFile(filename) + if err != nil { + return err + } + return b.ParseTranslationFileBytes(filename, buf) +} + +// ParseTranslationFileBytes is similar to LoadTranslationFile except it parses the bytes in buf. +// +// It is useful for parsing translation files embedded with go-bindata. +func (b *Bundle) ParseTranslationFileBytes(filename string, buf []byte) error { + basename := filepath.Base(filename) + langs := language.Parse(basename) + switch l := len(langs); { + case l == 0: + return fmt.Errorf("no language found in %q", basename) + case l > 1: + return fmt.Errorf("multiple languages found in filename %q: %v; expected one", basename, langs) + } + translations, err := parseTranslations(filename, buf) + if err != nil { + return err + } + b.AddTranslation(langs[0], translations...) + return nil +} + +func parseTranslations(filename string, buf []byte) ([]translation.Translation, error) { + var unmarshalFunc func([]byte, interface{}) error + switch format := filepath.Ext(filename); format { + case ".json": + unmarshalFunc = json.Unmarshal + case ".yaml": + unmarshalFunc = yaml.Unmarshal + default: + return nil, fmt.Errorf("unsupported file extension %s", format) + } + + var translationsData []map[string]interface{} + if len(buf) > 0 { + if err := unmarshalFunc(buf, &translationsData); err != nil { + return nil, err + } + } + + translations := make([]translation.Translation, 0, len(translationsData)) + for i, translationData := range translationsData { + t, err := translation.NewTranslation(translationData) + if err != nil { + return nil, fmt.Errorf("unable to parse translation #%d in %s because %s\n%v", i, filename, err, translationData) + } + translations = append(translations, t) + } + return translations, nil +} + +// AddTranslation adds translations for a language. +// +// It is useful if your translations are in a format not supported by LoadTranslationFile. +func (b *Bundle) AddTranslation(lang *language.Language, translations ...translation.Translation) { + if b.translations[lang.Tag] == nil { + b.translations[lang.Tag] = make(map[string]translation.Translation, len(translations)) + } + currentTranslations := b.translations[lang.Tag] + for _, newTranslation := range translations { + if currentTranslation := currentTranslations[newTranslation.ID()]; currentTranslation != nil { + currentTranslations[newTranslation.ID()] = currentTranslation.Merge(newTranslation) + } else { + currentTranslations[newTranslation.ID()] = newTranslation + } + } + + // lang can provide translations for less specific language tags. + for _, tag := range lang.MatchingTags() { + b.fallbackTranslations[tag] = currentTranslations + } +} + +// Translations returns all translations in the bundle. +func (b *Bundle) Translations() map[string]map[string]translation.Translation { + return b.translations +} + +// LanguageTags returns the tags of all languages that that have been added. +func (b *Bundle) LanguageTags() []string { + var tags []string + for k := range b.translations { + tags = append(tags, k) + } + return tags +} + +// LanguageTranslationIDs returns the ids of all translations that have been added for a given language. +func (b *Bundle) LanguageTranslationIDs(languageTag string) []string { + var ids []string + for id := range b.translations[languageTag] { + ids = append(ids, id) + } + return ids +} + +// MustTfunc is similar to Tfunc except it panics if an error happens. +func (b *Bundle) MustTfunc(pref string, prefs ...string) TranslateFunc { + tfunc, err := b.Tfunc(pref, prefs...) + if err != nil { + panic(err) + } + return tfunc +} + +// MustTfuncAndLanguage is similar to TfuncAndLanguage except it panics if an error happens. +func (b *Bundle) MustTfuncAndLanguage(pref string, prefs ...string) (TranslateFunc, *language.Language) { + tfunc, language, err := b.TfuncAndLanguage(pref, prefs...) + if err != nil { + panic(err) + } + return tfunc, language +} + +// Tfunc is similar to TfuncAndLanguage except is doesn't return the Language. +func (b *Bundle) Tfunc(pref string, prefs ...string) (TranslateFunc, error) { + tfunc, _, err := b.TfuncAndLanguage(pref, prefs...) + return tfunc, err +} + +// TfuncAndLanguage returns a TranslateFunc for the first Language that +// has a non-zero number of translations in the bundle. +// +// The returned Language matches the the first language preference that could be satisfied, +// but this may not strictly match the language of the translations used to satisfy that preference. +// +// For example, the user may request "zh". If there are no translations for "zh" but there are translations +// for "zh-cn", then the translations for "zh-cn" will be used but the returned Language will be "zh". +// +// It can parse languages from Accept-Language headers (RFC 2616), +// but it assumes weights are monotonically decreasing. +func (b *Bundle) TfuncAndLanguage(pref string, prefs ...string) (TranslateFunc, *language.Language, error) { + lang := b.supportedLanguage(pref, prefs...) + var err error + if lang == nil { + err = fmt.Errorf("no supported languages found %#v", append(prefs, pref)) + } + return func(translationID string, args ...interface{}) string { + return b.translate(lang, translationID, args...) + }, lang, err +} + +// supportedLanguage returns the first language which +// has a non-zero number of translations in the bundle. +func (b *Bundle) supportedLanguage(pref string, prefs ...string) *language.Language { + lang := b.translatedLanguage(pref) + if lang == nil { + for _, pref := range prefs { + lang = b.translatedLanguage(pref) + if lang != nil { + break + } + } + } + return lang +} + +func (b *Bundle) translatedLanguage(src string) *language.Language { + langs := language.Parse(src) + for _, lang := range langs { + if len(b.translations[lang.Tag]) > 0 || + len(b.fallbackTranslations[lang.Tag]) > 0 { + return lang + } + } + return nil +} + +func (b *Bundle) translate(lang *language.Language, translationID string, args ...interface{}) string { + if lang == nil { + return translationID + } + + translations := b.translations[lang.Tag] + if translations == nil { + translations = b.fallbackTranslations[lang.Tag] + if translations == nil { + return translationID + } + } + + translation := translations[translationID] + if translation == nil { + return translationID + } + + var data interface{} + var count interface{} + if argc := len(args); argc > 0 { + if isNumber(args[0]) { + count = args[0] + if argc > 1 { + data = args[1] + } + } else { + data = args[0] + } + } + + if count != nil { + if data == nil { + data = map[string]interface{}{"Count": count} + } else { + dataMap := toMap(data) + dataMap["Count"] = count + data = dataMap + } + } + + p, _ := lang.Plural(count) + template := translation.Template(p) + if template == nil { + return translationID + } + + s := template.Execute(data) + if s == "" { + return translationID + } + return s +} + +func isNumber(n interface{}) bool { + switch n.(type) { + case int, int8, int16, int32, int64, string: + return true + } + return false +} + +func toMap(input interface{}) map[string]interface{} { + if data, ok := input.(map[string]interface{}); ok { + return data + } + v := reflect.ValueOf(input) + switch v.Kind() { + case reflect.Ptr: + return toMap(v.Elem().Interface()) + case reflect.Struct: + return structToMap(v) + default: + return nil + } +} + +// Converts the top level of a struct to a map[string]interface{}. +// Code inspired by github.com/fatih/structs. +func structToMap(v reflect.Value) map[string]interface{} { + out := make(map[string]interface{}) + t := v.Type() + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + // unexported field. skip. + continue + } + out[field.Name] = v.FieldByName(field.Name).Interface() + } + return out +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/i18n.go b/vendor/github.com/nicksnyder/go-i18n/i18n/i18n.go new file mode 100644 index 00000000..f9684296 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/i18n.go @@ -0,0 +1,152 @@ +// Package i18n supports string translations with variable substitution and CLDR pluralization. +// It is intended to be used in conjunction with the goi18n command, although that is not strictly required. +// +// Initialization +// +// Your Go program should load translations during its initialization. +// i18n.MustLoadTranslationFile("path/to/fr-FR.all.json") +// If your translations are in a file format not supported by (Must)?LoadTranslationFile, +// then you can use the AddTranslation function to manually add translations. +// +// Fetching a translation +// +// Use Tfunc or MustTfunc to fetch a TranslateFunc that will return the translated string for a specific language. +// func handleRequest(w http.ResponseWriter, r *http.Request) { +// cookieLang := r.Cookie("lang") +// acceptLang := r.Header.Get("Accept-Language") +// defaultLang = "en-US" // known valid language +// T, err := i18n.Tfunc(cookieLang, acceptLang, defaultLang) +// fmt.Println(T("Hello world")) +// } +// +// Usually it is a good idea to identify strings by a generic id rather than the English translation, +// but the rest of this documentation will continue to use the English translation for readability. +// T("Hello world") // ok +// T("programGreeting") // better! +// +// Variables +// +// TranslateFunc supports strings that have variables using the text/template syntax. +// T("Hello {{.Person}}", map[string]interface{}{ +// "Person": "Bob", +// }) +// +// Pluralization +// +// TranslateFunc supports the pluralization of strings using the CLDR pluralization rules defined here: +// http://www.unicode.org/cldr/charts/latest/supplemental/language_plural_rules.html +// T("You have {{.Count}} unread emails.", 2) +// T("I am {{.Count}} meters tall.", "1.7") +// +// Plural strings may also have variables. +// T("{{.Person}} has {{.Count}} unread emails", 2, map[string]interface{}{ +// "Person": "Bob", +// }) +// +// Sentences with multiple plural components can be supported with nesting. +// T("{{.Person}} has {{.Count}} unread emails in the past {{.Timeframe}}.", 3, map[string]interface{}{ +// "Person": "Bob", +// "Timeframe": T("{{.Count}} days", 2), +// }) +// +// Templates +// +// You can use the .Funcs() method of a text/template or html/template to register a TranslateFunc +// for usage inside of that template. +package i18n + +import ( + "github.com/nicksnyder/go-i18n/i18n/bundle" + "github.com/nicksnyder/go-i18n/i18n/language" + "github.com/nicksnyder/go-i18n/i18n/translation" +) + +// TranslateFunc returns the translation of the string identified by translationID. +// +// If there is no translation for translationID, then the translationID itself is returned. +// This makes it easy to identify missing translations in your app. +// +// If translationID is a non-plural form, then the first variadic argument may be a map[string]interface{} +// or struct that contains template data. +// +// If translationID is a plural form, then the first variadic argument must be an integer type +// (int, int8, int16, int32, int64) or a float formatted as a string (e.g. "123.45"). +// The second variadic argument may be a map[string]interface{} or struct that contains template data. +type TranslateFunc func(translationID string, args ...interface{}) string + +// IdentityTfunc returns a TranslateFunc that always returns the translationID passed to it. +// +// It is a useful placeholder when parsing a text/template or html/template +// before the actual Tfunc is available. +func IdentityTfunc() TranslateFunc { + return func(translationID string, args ...interface{}) string { + return translationID + } +} + +var defaultBundle = bundle.New() + +// MustLoadTranslationFile is similar to LoadTranslationFile +// except it panics if an error happens. +func MustLoadTranslationFile(filename string) { + defaultBundle.MustLoadTranslationFile(filename) +} + +// LoadTranslationFile loads the translations from filename into memory. +// +// The language that the translations are associated with is parsed from the filename (e.g. en-US.json). +// +// Generally you should load translation files once during your program's initialization. +func LoadTranslationFile(filename string) error { + return defaultBundle.LoadTranslationFile(filename) +} + +// ParseTranslationFileBytes is similar to LoadTranslationFile except it parses the bytes in buf. +// +// It is useful for parsing translation files embedded with go-bindata. +func ParseTranslationFileBytes(filename string, buf []byte) error { + return defaultBundle.ParseTranslationFileBytes(filename, buf) +} + +// AddTranslation adds translations for a language. +// +// It is useful if your translations are in a format not supported by LoadTranslationFile. +func AddTranslation(lang *language.Language, translations ...translation.Translation) { + defaultBundle.AddTranslation(lang, translations...) +} + +// LanguageTags returns the tags of all languages that have been added. +func LanguageTags() []string { + return defaultBundle.LanguageTags() +} + +// LanguageTranslationIDs returns the ids of all translations that have been added for a given language. +func LanguageTranslationIDs(languageTag string) []string { + return defaultBundle.LanguageTranslationIDs(languageTag) +} + +// MustTfunc is similar to Tfunc except it panics if an error happens. +func MustTfunc(languageSource string, languageSources ...string) TranslateFunc { + return TranslateFunc(defaultBundle.MustTfunc(languageSource, languageSources...)) +} + +// Tfunc returns a TranslateFunc that will be bound to the first language which +// has a non-zero number of translations. +// +// It can parse languages from Accept-Language headers (RFC 2616). +func Tfunc(languageSource string, languageSources ...string) (TranslateFunc, error) { + tfunc, err := defaultBundle.Tfunc(languageSource, languageSources...) + return TranslateFunc(tfunc), err +} + +// MustTfuncAndLanguage is similar to TfuncAndLanguage except it panics if an error happens. +func MustTfuncAndLanguage(languageSource string, languageSources ...string) (TranslateFunc, *language.Language) { + tfunc, lang := defaultBundle.MustTfuncAndLanguage(languageSource, languageSources...) + return TranslateFunc(tfunc), lang +} + +// TfuncAndLanguage is similar to Tfunc except it also returns the language which TranslateFunc is bound to. +func TfuncAndLanguage(languageSource string, languageSources ...string) (TranslateFunc, *language.Language, error) { + tfunc, lang, err := defaultBundle.TfuncAndLanguage(languageSource, languageSources...) + return TranslateFunc(tfunc), lang, err +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go new file mode 100644 index 00000000..5d6b6ad4 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/main.go @@ -0,0 +1,132 @@ +package main + +import ( + "encoding/xml" + "flag" + "fmt" + "io/ioutil" + "os" + "text/template" +) + +var usage = `%[1]s generates Go code to support CLDR plural rules. + +Usage: %[1]s [options] + +Options: + +` + +func main() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, usage, os.Args[0]) + flag.PrintDefaults() + } + var in, cout, tout string + flag.StringVar(&in, "i", "plurals.xml", "the input XML file containing CLDR plural rules") + flag.StringVar(&cout, "cout", "", "the code output file") + flag.StringVar(&tout, "tout", "", "the test output file") + flag.BoolVar(&verbose, "v", false, "verbose output") + flag.Parse() + + buf, err := ioutil.ReadFile(in) + if err != nil { + fatalf("failed to read file: %s", err) + } + + var data SupplementalData + if err := xml.Unmarshal(buf, &data); err != nil { + fatalf("failed to unmarshal xml: %s", err) + } + + count := 0 + for _, pg := range data.PluralGroups { + count += len(pg.SplitLocales()) + } + infof("parsed %d locales", count) + + if cout != "" { + file := openWritableFile(cout) + if err := codeTemplate.Execute(file, data); err != nil { + fatalf("unable to execute code template because %s", err) + } else { + infof("generated %s", cout) + } + } else { + infof("not generating code file (use -cout)") + } + + if tout != "" { + file := openWritableFile(tout) + if err := testTemplate.Execute(file, data); err != nil { + fatalf("unable to execute test template because %s", err) + } else { + infof("generated %s", tout) + } + } else { + infof("not generating test file (use -tout)") + } +} + +func openWritableFile(name string) *os.File { + file, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + fatalf("failed to write file %s because %s", name, err) + } + return file +} + +var codeTemplate = template.Must(template.New("spec").Parse(`package language +// This file is generated by i18n/language/codegen/generate.sh + +func init() { +{{range .PluralGroups}} + registerPluralSpec({{printf "%#v" .SplitLocales}}, &PluralSpec{ + Plurals: newPluralSet({{range $i, $e := .PluralRules}}{{if $i}}, {{end}}{{$e.CountTitle}}{{end}}), + PluralFunc: func(ops *operands) Plural { {{range .PluralRules}}{{if .GoCondition}} + // {{.Condition}} + if {{.GoCondition}} { + return {{.CountTitle}} + }{{end}}{{end}} + return Other + }, + }){{end}} +} +`)) + +var testTemplate = template.Must(template.New("spec").Parse(`package language +// This file is generated by i18n/language/codegen/generate.sh + +import "testing" + +{{range .PluralGroups}} +func Test{{.Name}}(t *testing.T) { + var tests []pluralTest + {{range .PluralRules}} + {{if .IntegerExamples}}tests = appendIntegerTests(tests, {{.CountTitle}}, {{printf "%#v" .IntegerExamples}}){{end}} + {{if .DecimalExamples}}tests = appendDecimalTests(tests, {{.CountTitle}}, {{printf "%#v" .DecimalExamples}}){{end}} + {{end}} + locales := {{printf "%#v" .SplitLocales}} + for _, locale := range locales { + runTests(t, locale, tests) + } +} +{{end}} +`)) + +func infof(format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, format+"\n", args...) +} + +var verbose bool + +func verbosef(format string, args ...interface{}) { + if verbose { + infof(format, args...) + } +} + +func fatalf(format string, args ...interface{}) { + infof("fatal: "+format+"\n", args...) + os.Exit(1) +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/xml.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/xml.go new file mode 100644 index 00000000..9d39053c --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/codegen/xml.go @@ -0,0 +1,143 @@ +package main + +import ( + "encoding/xml" + "fmt" + "regexp" + "strings" +) + +// SupplementalData is the top level struct of plural.xml +type SupplementalData struct { + XMLName xml.Name `xml:"supplementalData"` + PluralGroups []PluralGroup `xml:"plurals>pluralRules"` +} + +// PluralGroup is a group of locales with the same plural rules. +type PluralGroup struct { + Locales string `xml:"locales,attr"` + PluralRules []PluralRule `xml:"pluralRule"` +} + +// Name returns a unique name for this plural group. +func (pg *PluralGroup) Name() string { + n := strings.Title(pg.Locales) + return strings.Replace(n, " ", "", -1) +} + +// SplitLocales returns all the locales in the PluralGroup as a slice. +func (pg *PluralGroup) SplitLocales() []string { + return strings.Split(pg.Locales, " ") +} + +// PluralRule is the rule for a single plural form. +type PluralRule struct { + Count string `xml:"count,attr"` + Rule string `xml:",innerxml"` +} + +// CountTitle returns the title case of the PluralRule's count. +func (pr *PluralRule) CountTitle() string { + return strings.Title(pr.Count) +} + +// Condition returns the condition where the PluralRule applies. +func (pr *PluralRule) Condition() string { + i := strings.Index(pr.Rule, "@") + return pr.Rule[:i] +} + +// Examples returns the integer and decimal exmaples for the PLuralRule. +func (pr *PluralRule) Examples() (integer []string, decimal []string) { + ex := strings.Replace(pr.Rule, ", …", "", -1) + ddelim := "@decimal" + if i := strings.Index(ex, ddelim); i > 0 { + dex := strings.TrimSpace(ex[i+len(ddelim):]) + decimal = strings.Split(dex, ", ") + ex = ex[:i] + } + idelim := "@integer" + if i := strings.Index(ex, idelim); i > 0 { + iex := strings.TrimSpace(ex[i+len(idelim):]) + integer = strings.Split(iex, ", ") + } + return integer, decimal +} + +// IntegerExamples returns the integer exmaples for the PLuralRule. +func (pr *PluralRule) IntegerExamples() []string { + integer, _ := pr.Examples() + return integer +} + +// DecimalExamples returns the decimal exmaples for the PLuralRule. +func (pr *PluralRule) DecimalExamples() []string { + _, decimal := pr.Examples() + return decimal +} + +var relationRegexp = regexp.MustCompile("([niftvw])(?: % ([0-9]+))? (!=|=)(.*)") + +// GoCondition converts the XML condition to valid Go code. +func (pr *PluralRule) GoCondition() string { + var ors []string + for _, and := range strings.Split(pr.Condition(), "or") { + var ands []string + for _, relation := range strings.Split(and, "and") { + parts := relationRegexp.FindStringSubmatch(relation) + if parts == nil { + continue + } + lvar, lmod, op, rhs := strings.Title(parts[1]), parts[2], parts[3], strings.TrimSpace(parts[4]) + if op == "=" { + op = "==" + } + lvar = "ops." + lvar + var rhor []string + var rany []string + for _, rh := range strings.Split(rhs, ",") { + if parts := strings.Split(rh, ".."); len(parts) == 2 { + from, to := parts[0], parts[1] + if lvar == "ops.N" { + if lmod != "" { + rhor = append(rhor, fmt.Sprintf("ops.NmodInRange(%s, %s, %s)", lmod, from, to)) + } else { + rhor = append(rhor, fmt.Sprintf("ops.NinRange(%s, %s)", from, to)) + } + } else if lmod != "" { + rhor = append(rhor, fmt.Sprintf("intInRange(%s %% %s, %s, %s)", lvar, lmod, from, to)) + } else { + rhor = append(rhor, fmt.Sprintf("intInRange(%s, %s, %s)", lvar, from, to)) + } + } else { + rany = append(rany, rh) + } + } + + if len(rany) > 0 { + rh := strings.Join(rany, ",") + if lvar == "ops.N" { + if lmod != "" { + rhor = append(rhor, fmt.Sprintf("ops.NmodEqualsAny(%s, %s)", lmod, rh)) + } else { + rhor = append(rhor, fmt.Sprintf("ops.NequalsAny(%s)", rh)) + } + } else if lmod != "" { + rhor = append(rhor, fmt.Sprintf("intEqualsAny(%s %% %s, %s)", lvar, lmod, rh)) + } else { + rhor = append(rhor, fmt.Sprintf("intEqualsAny(%s, %s)", lvar, rh)) + } + } + r := strings.Join(rhor, " || ") + if len(rhor) > 1 { + r = "(" + r + ")" + } + if op == "!=" { + r = "!" + r + } + ands = append(ands, r) + } + ors = append(ors, strings.Join(ands, " && ")) + } + return strings.Join(ors, " ||\n") +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go new file mode 100644 index 00000000..9a155efc --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/language.go @@ -0,0 +1,99 @@ +// Package language defines languages that implement CLDR pluralization. +package language + +import ( + "fmt" + "strings" +) + +// Language is a written human language. +type Language struct { + // Tag uniquely identifies the language as defined by RFC 5646. + // + // Most language tags are a two character language code (ISO 639-1) + // optionally followed by a dash and a two character country code (ISO 3166-1). + // (e.g. en, pt-br) + Tag string + *PluralSpec +} + +func (l *Language) String() string { + return l.Tag +} + +// MatchingTags returns the set of language tags that map to this Language. +// e.g. "zh-hans-cn" yields {"zh", "zh-hans", "zh-hans-cn"} +// BUG: This should be computed once and stored as a field on Language for efficiency, +// but this would require changing how Languages are constructed. +func (l *Language) MatchingTags() []string { + parts := strings.Split(l.Tag, "-") + var prefix, matches []string + for _, part := range parts { + prefix = append(prefix, part) + match := strings.Join(prefix, "-") + matches = append(matches, match) + } + return matches +} + +// Parse returns a slice of supported languages found in src or nil if none are found. +// It can parse language tags and Accept-Language headers. +func Parse(src string) []*Language { + var langs []*Language + start := 0 + for end, chr := range src { + switch chr { + case ',', ';', '.': + tag := strings.TrimSpace(src[start:end]) + if spec := getPluralSpec(tag); spec != nil { + langs = append(langs, &Language{NormalizeTag(tag), spec}) + } + start = end + 1 + } + } + if start > 0 { + tag := strings.TrimSpace(src[start:]) + if spec := getPluralSpec(tag); spec != nil { + langs = append(langs, &Language{NormalizeTag(tag), spec}) + } + return dedupe(langs) + } + if spec := getPluralSpec(src); spec != nil { + langs = append(langs, &Language{NormalizeTag(src), spec}) + } + return langs +} + +func dedupe(langs []*Language) []*Language { + found := make(map[string]struct{}, len(langs)) + deduped := make([]*Language, 0, len(langs)) + for _, lang := range langs { + if _, ok := found[lang.Tag]; !ok { + found[lang.Tag] = struct{}{} + deduped = append(deduped, lang) + } + } + return deduped +} + +// MustParse is similar to Parse except it panics instead of retuning a nil Language. +func MustParse(src string) []*Language { + langs := Parse(src) + if len(langs) == 0 { + panic(fmt.Errorf("unable to parse language from %q", src)) + } + return langs +} + +// Add adds support for a new language. +func Add(l *Language) { + tag := NormalizeTag(l.Tag) + pluralSpecs[tag] = l.PluralSpec +} + +// NormalizeTag returns a language tag with all lower-case characters +// and dashes "-" instead of underscores "_" +func NormalizeTag(tag string) string { + tag = strings.ToLower(tag) + return strings.Replace(tag, "_", "-", -1) +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go new file mode 100644 index 00000000..877bcc89 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/operands.go @@ -0,0 +1,119 @@ +package language + +import ( + "fmt" + "strconv" + "strings" +) + +// http://unicode.org/reports/tr35/tr35-numbers.html#Operands +type operands struct { + N float64 // absolute value of the source number (integer and decimals) + I int64 // integer digits of n + V int64 // number of visible fraction digits in n, with trailing zeros + W int64 // number of visible fraction digits in n, without trailing zeros + F int64 // visible fractional digits in n, with trailing zeros + T int64 // visible fractional digits in n, without trailing zeros +} + +// NmodEqualAny returns true if o represents an integer equal to any of the arguments. +func (o *operands) NequalsAny(any ...int64) bool { + for _, i := range any { + if o.I == i && o.T == 0 { + return true + } + } + return false +} + +// NmodEqualAny returns true if o represents an integer equal to any of the arguments modulo mod. +func (o *operands) NmodEqualsAny(mod int64, any ...int64) bool { + modI := o.I % mod + for _, i := range any { + if modI == i && o.T == 0 { + return true + } + } + return false +} + +// NmodInRange returns true if o represents an integer in the closed interval [from, to]. +func (o *operands) NinRange(from, to int64) bool { + return o.T == 0 && from <= o.I && o.I <= to +} + +// NmodInRange returns true if o represents an integer in the closed interval [from, to] modulo mod. +func (o *operands) NmodInRange(mod, from, to int64) bool { + modI := o.I % mod + return o.T == 0 && from <= modI && modI <= to +} + +func newOperands(v interface{}) (*operands, error) { + switch v := v.(type) { + case int: + return newOperandsInt64(int64(v)), nil + case int8: + return newOperandsInt64(int64(v)), nil + case int16: + return newOperandsInt64(int64(v)), nil + case int32: + return newOperandsInt64(int64(v)), nil + case int64: + return newOperandsInt64(v), nil + case string: + return newOperandsString(v) + case float32, float64: + return nil, fmt.Errorf("floats should be formatted into a string") + default: + return nil, fmt.Errorf("invalid type %T; expected integer or string", v) + } +} + +func newOperandsInt64(i int64) *operands { + if i < 0 { + i = -i + } + return &operands{float64(i), i, 0, 0, 0, 0} +} + +func newOperandsString(s string) (*operands, error) { + if s[0] == '-' { + s = s[1:] + } + n, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, err + } + ops := &operands{N: n} + parts := strings.SplitN(s, ".", 2) + ops.I, err = strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return nil, err + } + if len(parts) == 1 { + return ops, nil + } + fraction := parts[1] + ops.V = int64(len(fraction)) + for i := ops.V - 1; i >= 0; i-- { + if fraction[i] != '0' { + ops.W = i + 1 + break + } + } + if ops.V > 0 { + f, err := strconv.ParseInt(fraction, 10, 0) + if err != nil { + return nil, err + } + ops.F = f + } + if ops.W > 0 { + t, err := strconv.ParseInt(fraction[:ops.W], 10, 0) + if err != nil { + return nil, err + } + ops.T = t + } + return ops, nil +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/plural.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/plural.go new file mode 100644 index 00000000..1f3ea5c6 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/plural.go @@ -0,0 +1,40 @@ +package language + +import ( + "fmt" +) + +// Plural represents a language pluralization form as defined here: +// http://cldr.unicode.org/index/cldr-spec/plural-rules +type Plural string + +// All defined plural categories. +const ( + Invalid Plural = "invalid" + Zero = "zero" + One = "one" + Two = "two" + Few = "few" + Many = "many" + Other = "other" +) + +// NewPlural returns src as a Plural +// or Invalid and a non-nil error if src is not a valid Plural. +func NewPlural(src string) (Plural, error) { + switch src { + case "zero": + return Zero, nil + case "one": + return One, nil + case "two": + return Two, nil + case "few": + return Few, nil + case "many": + return Many, nil + case "other": + return Other, nil + } + return Invalid, fmt.Errorf("invalid plural category %s", src) +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go new file mode 100644 index 00000000..fc352268 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec.go @@ -0,0 +1,74 @@ +package language + +import "strings" + +// PluralSpec defines the CLDR plural rules for a language. +// http://www.unicode.org/cldr/charts/latest/supplemental/language_plural_rules.html +// http://unicode.org/reports/tr35/tr35-numbers.html#Operands +type PluralSpec struct { + Plurals map[Plural]struct{} + PluralFunc func(*operands) Plural +} + +var pluralSpecs = make(map[string]*PluralSpec) + +func normalizePluralSpecID(id string) string { + id = strings.Replace(id, "_", "-", -1) + id = strings.ToLower(id) + return id +} + +func registerPluralSpec(ids []string, ps *PluralSpec) { + for _, id := range ids { + id = normalizePluralSpecID(id) + pluralSpecs[id] = ps + } +} + +// Plural returns the plural category for number as defined by +// the language's CLDR plural rules. +func (ps *PluralSpec) Plural(number interface{}) (Plural, error) { + ops, err := newOperands(number) + if err != nil { + return Invalid, err + } + return ps.PluralFunc(ops), nil +} + +// getPluralSpec returns the PluralSpec that matches the longest prefix of tag. +// It returns nil if no PluralSpec matches tag. +func getPluralSpec(tag string) *PluralSpec { + tag = NormalizeTag(tag) + subtag := tag + for { + if spec := pluralSpecs[subtag]; spec != nil { + return spec + } + end := strings.LastIndex(subtag, "-") + if end == -1 { + return nil + } + subtag = subtag[:end] + } +} + +func newPluralSet(plurals ...Plural) map[Plural]struct{} { + set := make(map[Plural]struct{}, len(plurals)) + for _, plural := range plurals { + set[plural] = struct{}{} + } + return set +} + +func intInRange(i, from, to int64) bool { + return from <= i && i <= to +} + +func intEqualsAny(i int64, any ...int64) bool { + for _, a := range any { + if i == a { + return true + } + } + return false +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go new file mode 100644 index 00000000..c9b4f266 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/language/pluralspec_gen.go @@ -0,0 +1,567 @@ +package language + +// This file is generated by i18n/language/codegen/generate.sh + +func init() { + + registerPluralSpec([]string{"bm", "bo", "dz", "id", "ig", "ii", "in", "ja", "jbo", "jv", "jw", "kde", "kea", "km", "ko", "lkt", "lo", "ms", "my", "nqo", "root", "sah", "ses", "sg", "th", "to", "vi", "wo", "yo", "zh"}, &PluralSpec{ + Plurals: newPluralSet(Other), + PluralFunc: func(ops *operands) Plural { + return Other + }, + }) + registerPluralSpec([]string{"am", "as", "bn", "fa", "gu", "hi", "kn", "mr", "zu"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // i = 0 or n = 1 + if intEqualsAny(ops.I, 0) || + ops.NequalsAny(1) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"ff", "fr", "hy", "kab"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // i = 0,1 + if intEqualsAny(ops.I, 0, 1) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"ast", "ca", "de", "en", "et", "fi", "fy", "gl", "it", "ji", "nl", "sv", "sw", "ur", "yi"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"si"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // n = 0,1 or i = 0 and f = 1 + if ops.NequalsAny(0, 1) || + intEqualsAny(ops.I, 0) && intEqualsAny(ops.F, 1) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"ak", "bh", "guw", "ln", "mg", "nso", "pa", "ti", "wa"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // n = 0..1 + if ops.NinRange(0, 1) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"tzm"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // n = 0..1 or n = 11..99 + if ops.NinRange(0, 1) || + ops.NinRange(11, 99) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"pt"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // n = 0..2 and n != 2 + if ops.NinRange(0, 2) && !ops.NequalsAny(2) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"af", "asa", "az", "bem", "bez", "bg", "brx", "ce", "cgg", "chr", "ckb", "dv", "ee", "el", "eo", "es", "eu", "fo", "fur", "gsw", "ha", "haw", "hu", "jgo", "jmc", "ka", "kaj", "kcg", "kk", "kkj", "kl", "ks", "ksb", "ku", "ky", "lb", "lg", "mas", "mgo", "ml", "mn", "nah", "nb", "nd", "ne", "nn", "nnh", "no", "nr", "ny", "nyn", "om", "or", "os", "pap", "ps", "rm", "rof", "rwk", "saq", "sdh", "seh", "sn", "so", "sq", "ss", "ssy", "st", "syr", "ta", "te", "teo", "tig", "tk", "tn", "tr", "ts", "ug", "uz", "ve", "vo", "vun", "wae", "xh", "xog"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // n = 1 + if ops.NequalsAny(1) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"pt_PT"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // n = 1 and v = 0 + if ops.NequalsAny(1) && intEqualsAny(ops.V, 0) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"da"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // n = 1 or t != 0 and i = 0,1 + if ops.NequalsAny(1) || + !intEqualsAny(ops.T, 0) && intEqualsAny(ops.I, 0, 1) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"is"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // t = 0 and i % 10 = 1 and i % 100 != 11 or t != 0 + if intEqualsAny(ops.T, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) || + !intEqualsAny(ops.T, 0) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"mk"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // v = 0 and i % 10 = 1 or f % 10 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) || + intEqualsAny(ops.F%10, 1) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"fil", "tl"}, &PluralSpec{ + Plurals: newPluralSet(One, Other), + PluralFunc: func(ops *operands) Plural { + // v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I, 1, 2, 3) || + intEqualsAny(ops.V, 0) && !intEqualsAny(ops.I%10, 4, 6, 9) || + !intEqualsAny(ops.V, 0) && !intEqualsAny(ops.F%10, 4, 6, 9) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"lv", "prg"}, &PluralSpec{ + Plurals: newPluralSet(Zero, One, Other), + PluralFunc: func(ops *operands) Plural { + // n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 + if ops.NmodEqualsAny(10, 0) || + ops.NmodInRange(100, 11, 19) || + intEqualsAny(ops.V, 2) && intInRange(ops.F%100, 11, 19) { + return Zero + } + // n % 10 = 1 and n % 100 != 11 or v = 2 and f % 10 = 1 and f % 100 != 11 or v != 2 and f % 10 = 1 + if ops.NmodEqualsAny(10, 1) && !ops.NmodEqualsAny(100, 11) || + intEqualsAny(ops.V, 2) && intEqualsAny(ops.F%10, 1) && !intEqualsAny(ops.F%100, 11) || + !intEqualsAny(ops.V, 2) && intEqualsAny(ops.F%10, 1) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"lag"}, &PluralSpec{ + Plurals: newPluralSet(Zero, One, Other), + PluralFunc: func(ops *operands) Plural { + // n = 0 + if ops.NequalsAny(0) { + return Zero + } + // i = 0,1 and n != 0 + if intEqualsAny(ops.I, 0, 1) && !ops.NequalsAny(0) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"ksh"}, &PluralSpec{ + Plurals: newPluralSet(Zero, One, Other), + PluralFunc: func(ops *operands) Plural { + // n = 0 + if ops.NequalsAny(0) { + return Zero + } + // n = 1 + if ops.NequalsAny(1) { + return One + } + return Other + }, + }) + registerPluralSpec([]string{"iu", "kw", "naq", "se", "sma", "smi", "smj", "smn", "sms"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Other), + PluralFunc: func(ops *operands) Plural { + // n = 1 + if ops.NequalsAny(1) { + return One + } + // n = 2 + if ops.NequalsAny(2) { + return Two + } + return Other + }, + }) + registerPluralSpec([]string{"shi"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Other), + PluralFunc: func(ops *operands) Plural { + // i = 0 or n = 1 + if intEqualsAny(ops.I, 0) || + ops.NequalsAny(1) { + return One + } + // n = 2..10 + if ops.NinRange(2, 10) { + return Few + } + return Other + }, + }) + registerPluralSpec([]string{"mo", "ro"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Other), + PluralFunc: func(ops *operands) Plural { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // v != 0 or n = 0 or n != 1 and n % 100 = 1..19 + if !intEqualsAny(ops.V, 0) || + ops.NequalsAny(0) || + !ops.NequalsAny(1) && ops.NmodInRange(100, 1, 19) { + return Few + } + return Other + }, + }) + registerPluralSpec([]string{"bs", "hr", "sh", "sr"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Other), + PluralFunc: func(ops *operands) Plural { + // v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) || + intEqualsAny(ops.F%10, 1) && !intEqualsAny(ops.F%100, 11) { + return One + } + // v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 2, 4) && !intInRange(ops.I%100, 12, 14) || + intInRange(ops.F%10, 2, 4) && !intInRange(ops.F%100, 12, 14) { + return Few + } + return Other + }, + }) + registerPluralSpec([]string{"gd"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Other), + PluralFunc: func(ops *operands) Plural { + // n = 1,11 + if ops.NequalsAny(1, 11) { + return One + } + // n = 2,12 + if ops.NequalsAny(2, 12) { + return Two + } + // n = 3..10,13..19 + if ops.NinRange(3, 10) || ops.NinRange(13, 19) { + return Few + } + return Other + }, + }) + registerPluralSpec([]string{"sl"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Other), + PluralFunc: func(ops *operands) Plural { + // v = 0 and i % 100 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 1) { + return One + } + // v = 0 and i % 100 = 2 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 2) { + return Two + } + // v = 0 and i % 100 = 3..4 or v != 0 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 3, 4) || + !intEqualsAny(ops.V, 0) { + return Few + } + return Other + }, + }) + registerPluralSpec([]string{"dsb", "hsb"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Other), + PluralFunc: func(ops *operands) Plural { + // v = 0 and i % 100 = 1 or f % 100 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 1) || + intEqualsAny(ops.F%100, 1) { + return One + } + // v = 0 and i % 100 = 2 or f % 100 = 2 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 2) || + intEqualsAny(ops.F%100, 2) { + return Two + } + // v = 0 and i % 100 = 3..4 or f % 100 = 3..4 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 3, 4) || + intInRange(ops.F%100, 3, 4) { + return Few + } + return Other + }, + }) + registerPluralSpec([]string{"he", "iw"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Many, Other), + PluralFunc: func(ops *operands) Plural { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // i = 2 and v = 0 + if intEqualsAny(ops.I, 2) && intEqualsAny(ops.V, 0) { + return Two + } + // v = 0 and n != 0..10 and n % 10 = 0 + if intEqualsAny(ops.V, 0) && !ops.NinRange(0, 10) && ops.NmodEqualsAny(10, 0) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"cs", "sk"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // i = 2..4 and v = 0 + if intInRange(ops.I, 2, 4) && intEqualsAny(ops.V, 0) { + return Few + } + // v != 0 + if !intEqualsAny(ops.V, 0) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"pl"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // i = 1 and v = 0 + if intEqualsAny(ops.I, 1) && intEqualsAny(ops.V, 0) { + return One + } + // v = 0 and i % 10 = 2..4 and i % 100 != 12..14 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 2, 4) && !intInRange(ops.I%100, 12, 14) { + return Few + } + // v = 0 and i != 1 and i % 10 = 0..1 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 12..14 + if intEqualsAny(ops.V, 0) && !intEqualsAny(ops.I, 1) && intInRange(ops.I%10, 0, 1) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 5, 9) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 12, 14) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"be"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // n % 10 = 1 and n % 100 != 11 + if ops.NmodEqualsAny(10, 1) && !ops.NmodEqualsAny(100, 11) { + return One + } + // n % 10 = 2..4 and n % 100 != 12..14 + if ops.NmodInRange(10, 2, 4) && !ops.NmodInRange(100, 12, 14) { + return Few + } + // n % 10 = 0 or n % 10 = 5..9 or n % 100 = 11..14 + if ops.NmodEqualsAny(10, 0) || + ops.NmodInRange(10, 5, 9) || + ops.NmodInRange(100, 11, 14) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"lt"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // n % 10 = 1 and n % 100 != 11..19 + if ops.NmodEqualsAny(10, 1) && !ops.NmodInRange(100, 11, 19) { + return One + } + // n % 10 = 2..9 and n % 100 != 11..19 + if ops.NmodInRange(10, 2, 9) && !ops.NmodInRange(100, 11, 19) { + return Few + } + // f != 0 + if !intEqualsAny(ops.F, 0) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"mt"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // n = 1 + if ops.NequalsAny(1) { + return One + } + // n = 0 or n % 100 = 2..10 + if ops.NequalsAny(0) || + ops.NmodInRange(100, 2, 10) { + return Few + } + // n % 100 = 11..19 + if ops.NmodInRange(100, 11, 19) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"ru", "uk"}, &PluralSpec{ + Plurals: newPluralSet(One, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // v = 0 and i % 10 = 1 and i % 100 != 11 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) && !intEqualsAny(ops.I%100, 11) { + return One + } + // v = 0 and i % 10 = 2..4 and i % 100 != 12..14 + if intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 2, 4) && !intInRange(ops.I%100, 12, 14) { + return Few + } + // v = 0 and i % 10 = 0 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 11..14 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 0) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%10, 5, 9) || + intEqualsAny(ops.V, 0) && intInRange(ops.I%100, 11, 14) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"br"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // n % 10 = 1 and n % 100 != 11,71,91 + if ops.NmodEqualsAny(10, 1) && !ops.NmodEqualsAny(100, 11, 71, 91) { + return One + } + // n % 10 = 2 and n % 100 != 12,72,92 + if ops.NmodEqualsAny(10, 2) && !ops.NmodEqualsAny(100, 12, 72, 92) { + return Two + } + // n % 10 = 3..4,9 and n % 100 != 10..19,70..79,90..99 + if (ops.NmodInRange(10, 3, 4) || ops.NmodEqualsAny(10, 9)) && !(ops.NmodInRange(100, 10, 19) || ops.NmodInRange(100, 70, 79) || ops.NmodInRange(100, 90, 99)) { + return Few + } + // n != 0 and n % 1000000 = 0 + if !ops.NequalsAny(0) && ops.NmodEqualsAny(1000000, 0) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"ga"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // n = 1 + if ops.NequalsAny(1) { + return One + } + // n = 2 + if ops.NequalsAny(2) { + return Two + } + // n = 3..6 + if ops.NinRange(3, 6) { + return Few + } + // n = 7..10 + if ops.NinRange(7, 10) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"gv"}, &PluralSpec{ + Plurals: newPluralSet(One, Two, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // v = 0 and i % 10 = 1 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 1) { + return One + } + // v = 0 and i % 10 = 2 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%10, 2) { + return Two + } + // v = 0 and i % 100 = 0,20,40,60,80 + if intEqualsAny(ops.V, 0) && intEqualsAny(ops.I%100, 0, 20, 40, 60, 80) { + return Few + } + // v != 0 + if !intEqualsAny(ops.V, 0) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"ar"}, &PluralSpec{ + Plurals: newPluralSet(Zero, One, Two, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // n = 0 + if ops.NequalsAny(0) { + return Zero + } + // n = 1 + if ops.NequalsAny(1) { + return One + } + // n = 2 + if ops.NequalsAny(2) { + return Two + } + // n % 100 = 3..10 + if ops.NmodInRange(100, 3, 10) { + return Few + } + // n % 100 = 11..99 + if ops.NmodInRange(100, 11, 99) { + return Many + } + return Other + }, + }) + registerPluralSpec([]string{"cy"}, &PluralSpec{ + Plurals: newPluralSet(Zero, One, Two, Few, Many, Other), + PluralFunc: func(ops *operands) Plural { + // n = 0 + if ops.NequalsAny(0) { + return Zero + } + // n = 1 + if ops.NequalsAny(1) { + return One + } + // n = 2 + if ops.NequalsAny(2) { + return Two + } + // n = 3 + if ops.NequalsAny(3) { + return Few + } + // n = 6 + if ops.NequalsAny(6) { + return Many + } + return Other + }, + }) +} diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go new file mode 100644 index 00000000..4f579d16 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/plural_translation.go @@ -0,0 +1,78 @@ +package translation + +import ( + "github.com/nicksnyder/go-i18n/i18n/language" +) + +type pluralTranslation struct { + id string + templates map[language.Plural]*template +} + +func (pt *pluralTranslation) MarshalInterface() interface{} { + return map[string]interface{}{ + "id": pt.id, + "translation": pt.templates, + } +} + +func (pt *pluralTranslation) ID() string { + return pt.id +} + +func (pt *pluralTranslation) Template(pc language.Plural) *template { + return pt.templates[pc] +} + +func (pt *pluralTranslation) UntranslatedCopy() Translation { + return &pluralTranslation{pt.id, make(map[language.Plural]*template)} +} + +func (pt *pluralTranslation) Normalize(l *language.Language) Translation { + // Delete plural categories that don't belong to this language. + for pc := range pt.templates { + if _, ok := l.Plurals[pc]; !ok { + delete(pt.templates, pc) + } + } + // Create map entries for missing valid categories. + for pc := range l.Plurals { + if _, ok := pt.templates[pc]; !ok { + pt.templates[pc] = mustNewTemplate("") + } + } + return pt +} + +func (pt *pluralTranslation) Backfill(src Translation) Translation { + for pc, t := range pt.templates { + if t == nil || t.src == "" { + pt.templates[pc] = src.Template(language.Other) + } + } + return pt +} + +func (pt *pluralTranslation) Merge(t Translation) Translation { + other, ok := t.(*pluralTranslation) + if !ok || pt.ID() != t.ID() { + return t + } + for pluralCategory, template := range other.templates { + if template != nil && template.src != "" { + pt.templates[pluralCategory] = template + } + } + return pt +} + +func (pt *pluralTranslation) Incomplete(l *language.Language) bool { + for pc := range l.Plurals { + if t := pt.templates[pc]; t == nil || t.src == "" { + return true + } + } + return false +} + +var _ = Translation(&pluralTranslation{}) diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go new file mode 100644 index 00000000..1010e594 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/single_translation.go @@ -0,0 +1,57 @@ +package translation + +import ( + "github.com/nicksnyder/go-i18n/i18n/language" +) + +type singleTranslation struct { + id string + template *template +} + +func (st *singleTranslation) MarshalInterface() interface{} { + return map[string]interface{}{ + "id": st.id, + "translation": st.template, + } +} + +func (st *singleTranslation) ID() string { + return st.id +} + +func (st *singleTranslation) Template(pc language.Plural) *template { + return st.template +} + +func (st *singleTranslation) UntranslatedCopy() Translation { + return &singleTranslation{st.id, mustNewTemplate("")} +} + +func (st *singleTranslation) Normalize(language *language.Language) Translation { + return st +} + +func (st *singleTranslation) Backfill(src Translation) Translation { + if st.template == nil || st.template.src == "" { + st.template = src.Template(language.Other) + } + return st +} + +func (st *singleTranslation) Merge(t Translation) Translation { + other, ok := t.(*singleTranslation) + if !ok || st.ID() != t.ID() { + return t + } + if other.template != nil && other.template.src != "" { + st.template = other.template + } + return st +} + +func (st *singleTranslation) Incomplete(l *language.Language) bool { + return st.template == nil || st.template.src == "" +} + +var _ = Translation(&singleTranslation{}) diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/template.go b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/template.go new file mode 100644 index 00000000..c8756fa4 --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/template.go @@ -0,0 +1,61 @@ +package translation + +import ( + "bytes" + "encoding" + "strings" + gotemplate "text/template" +) + +type template struct { + tmpl *gotemplate.Template + src string +} + +func newTemplate(src string) (*template, error) { + var tmpl template + err := tmpl.parseTemplate(src) + return &tmpl, err +} + +func mustNewTemplate(src string) *template { + t, err := newTemplate(src) + if err != nil { + panic(err) + } + return t +} + +func (t *template) String() string { + return t.src +} + +func (t *template) Execute(args interface{}) string { + if t.tmpl == nil { + return t.src + } + var buf bytes.Buffer + if err := t.tmpl.Execute(&buf, args); err != nil { + return err.Error() + } + return buf.String() +} + +func (t *template) MarshalText() ([]byte, error) { + return []byte(t.src), nil +} + +func (t *template) UnmarshalText(src []byte) error { + return t.parseTemplate(string(src)) +} + +func (t *template) parseTemplate(src string) (err error) { + t.src = src + if strings.Contains(src, "{{") { + t.tmpl, err = gotemplate.New(src).Parse(src) + } + return +} + +var _ = encoding.TextMarshaler(&template{}) +var _ = encoding.TextUnmarshaler(&template{}) diff --git a/vendor/github.com/nicksnyder/go-i18n/i18n/translation/translation.go b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/translation.go new file mode 100644 index 00000000..fa93180b --- /dev/null +++ b/vendor/github.com/nicksnyder/go-i18n/i18n/translation/translation.go @@ -0,0 +1,83 @@ +// Package translation defines the interface for a translation. +package translation + +import ( + "fmt" + + "github.com/nicksnyder/go-i18n/i18n/language" +) + +// Translation is the interface that represents a translated string. +type Translation interface { + // MarshalInterface returns the object that should be used + // to serialize the translation. + MarshalInterface() interface{} + ID() string + Template(language.Plural) *template + UntranslatedCopy() Translation + Normalize(language *language.Language) Translation + Backfill(src Translation) Translation + Merge(Translation) Translation + Incomplete(l *language.Language) bool +} + +// SortableByID implements sort.Interface for a slice of translations. +type SortableByID []Translation + +func (a SortableByID) Len() int { return len(a) } +func (a SortableByID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a SortableByID) Less(i, j int) bool { return a[i].ID() < a[j].ID() } + +// NewTranslation reflects on data to create a new Translation. +// +// data["id"] must be a string and data["translation"] must be either a string +// for a non-plural translation or a map[string]interface{} for a plural translation. +func NewTranslation(data map[string]interface{}) (Translation, error) { + id, ok := data["id"].(string) + if !ok { + return nil, fmt.Errorf(`missing "id" key`) + } + var pluralObject map[string]interface{} + switch translation := data["translation"].(type) { + case string: + tmpl, err := newTemplate(translation) + if err != nil { + return nil, err + } + return &singleTranslation{id, tmpl}, nil + case map[interface{}]interface{}: + // The YAML parser uses interface{} keys so we first convert them to string keys. + pluralObject = make(map[string]interface{}) + for k, v := range translation { + kStr, ok := k.(string) + if !ok { + return nil, fmt.Errorf(`invalid plural category type %T; expected string`, k) + } + pluralObject[kStr] = v + } + case map[string]interface{}: + pluralObject = translation + case nil: + return nil, fmt.Errorf(`missing "translation" key`) + default: + return nil, fmt.Errorf(`unsupported type for "translation" key %T`, translation) + } + + templates := make(map[language.Plural]*template, len(pluralObject)) + for k, v := range pluralObject { + pc, err := language.NewPlural(k) + if err != nil { + return nil, err + } + str, ok := v.(string) + if !ok { + return nil, fmt.Errorf(`plural category "%s" has value of type %T; expected string`, pc, v) + } + tmpl, err := newTemplate(str) + if err != nil { + return nil, err + } + templates[pc] = tmpl + } + return &pluralTranslation{id, templates}, nil +} diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/pborman/uuid/LICENSE new file mode 100644 index 00000000..5dc68268 --- /dev/null +++ b/vendor/github.com/pborman/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go new file mode 100644 index 00000000..50a0f2d0 --- /dev/null +++ b/vendor/github.com/pborman/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go new file mode 100644 index 00000000..d8bd013e --- /dev/null +++ b/vendor/github.com/pborman/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go new file mode 100644 index 00000000..a0420c1e --- /dev/null +++ b/vendor/github.com/pborman/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/pborman/uuid/json.go b/vendor/github.com/pborman/uuid/json.go new file mode 100644 index 00000000..9dda1dfb --- /dev/null +++ b/vendor/github.com/pborman/uuid/json.go @@ -0,0 +1,34 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "errors" + +func (u UUID) MarshalJSON() ([]byte, error) { + if len(u) != 16 { + return []byte(`""`), nil + } + var js [38]byte + js[0] = '"' + encodeHex(js[1:], u) + js[37] = '"' + return js[:], nil +} + +func (u *UUID) UnmarshalJSON(data []byte) error { + if string(data) == `""` { + return nil + } + if data[0] != '"' { + return errors.New("invalid UUID format") + } + data = data[1 : len(data)-1] + uu := Parse(string(data)) + if uu == nil { + return errors.New("invalid UUID format") + } + *u = uu + return nil +} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go new file mode 100644 index 00000000..42d60da8 --- /dev/null +++ b/vendor/github.com/pborman/uuid/node.go @@ -0,0 +1,117 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "net" + "sync" +) + +var ( + nodeMu sync.Mutex + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == nil { + setNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go new file mode 100644 index 00000000..d015bfd1 --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql.go @@ -0,0 +1,66 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "errors" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // if an empty UUID comes from a table, we return a null UUID + if src.(string) == "" { + return nil + } + + // see uuid.Parse for required string format + parsed := Parse(src.(string)) + + if parsed == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = parsed + case []byte: + b := src.([]byte) + + // if an empty UUID comes from a table, we return a null UUID + if len(b) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) == 16 { + *uuid = UUID(b) + } else { + u := Parse(string(b)) + + if u == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = u + } + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go new file mode 100644 index 00000000..eedf2421 --- /dev/null +++ b/vendor/github.com/pborman/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clock_seq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go new file mode 100644 index 00000000..fc8e052c --- /dev/null +++ b/vendor/github.com/pborman/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go new file mode 100644 index 00000000..82c9e7ee --- /dev/null +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -0,0 +1,201 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "fmt" + "io" + "strings" +) + +// Array is a pass-by-value UUID that can be used as an effecient key in a map. +type Array [16]byte + +// UUID converts uuid into a slice. +func (uuid Array) UUID() UUID { + return uuid[:] +} + +// String returns the string representation of uuid, +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (uuid Array) String() string { + return uuid.UUID().String() +} + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + var uuid [16]byte + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid[:] +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// Array returns an array representation of uuid that can be used as a map key. +// Array panics if uuid is not valid. +func (uuid UUID) Array() Array { + if len(uuid) != 16 { + panic("invalid uuid") + } + var a Array + copy(a[:], uuid) + return a +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if len(uuid) != 16 { + return "" + } + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if len(uuid) != 16 { + return "" + } + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go new file mode 100644 index 00000000..0127eacf --- /dev/null +++ b/vendor/github.com/pborman/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, seq, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go new file mode 100644 index 00000000..b3d4a368 --- /dev/null +++ b/vendor/github.com/pborman/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/vendor/github.com/peterhellberg/giphy/client.go b/vendor/github.com/peterhellberg/giphy/client.go new file mode 100644 index 00000000..09c005c2 --- /dev/null +++ b/vendor/github.com/peterhellberg/giphy/client.go @@ -0,0 +1,117 @@ +package giphy + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// DefaultClient is the default Giphy API client +var DefaultClient = NewClient() + +// PublicBetaKey is the public beta key for the Giphy API +var PublicBetaKey = "dc6zaTOxFJmzC" + +// A Client communicates with the Giphy API. +type Client struct { + // APIKey is the key used for requests to the Giphy API + APIKey string + + // Limit is the limit used for requests to the Giphy API + Limit int + + // Rating is the rating used for requests to the Giphy API + Rating string + + // BaseURL is the base url for Giphy API. + BaseURL *url.URL + + // BasePath is the base path for the gifs endpoints + BasePath string + + // User agent used for HTTP requests to Giphy API. + UserAgent string + + // HTTP client used to communicate with the Giphy API. + httpClient *http.Client +} + +// NewClient returns a new Giphy API client. +// If no *http.Client were provided then http.DefaultClient is used. +func NewClient(httpClients ...*http.Client) *Client { + var httpClient *http.Client + + if len(httpClients) > 0 && httpClients[0] != nil { + httpClient = httpClients[0] + } else { + cloned := *http.DefaultClient + httpClient = &cloned + } + + c := &Client{ + APIKey: Env("GIPHY_API_KEY", PublicBetaKey), + Rating: Env("GIPHY_RATING", "g"), + Limit: EnvInt("GIPHY_LIMIT", 10), + BaseURL: &url.URL{ + Scheme: Env("GIPHY_BASE_URL_SCHEME", "https"), + Host: Env("GIPHY_BASE_URL_HOST", "api.giphy.com"), + }, + BasePath: Env("GIPHY_BASE_PATH", "/v1"), + UserAgent: Env("GIPHY_USER_AGENT", "giphy.go"), + httpClient: httpClient, + } + + return c +} + +// NewRequest creates an API request. +func (c *Client) NewRequest(s string) (*http.Request, error) { + rel, err := url.Parse(c.BasePath + s) + if err != nil { + return nil, err + } + + q := rel.Query() + q.Set("api_key", c.APIKey) + q.Set("rating", c.Rating) + rel.RawQuery = q.Encode() + + u := c.BaseURL.ResolveReference(rel) + + if EnvBool("GIPHY_VERBOSE", false) { + fmt.Println("giphy: GET", u.String()) + } + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + + req.Header.Add("User-Agent", c.UserAgent) + return req, nil +} + +// Do sends an API request and returns the API response. The API response is +// decoded and stored in the value pointed to by v, or returned as an error if +// an API error has occurred. +func (c *Client) Do(req *http.Request, v interface{}) (*http.Response, error) { + // Make sure to close the connection after replying to this request + req.Close = true + + resp, err := c.httpClient.Do(req) + if err != nil { + return resp, err + } + defer resp.Body.Close() + + if v != nil { + err = json.NewDecoder(resp.Body).Decode(v) + } + + if err != nil { + return nil, fmt.Errorf("error reading response from %s %s: %s", req.Method, req.URL.RequestURI(), err) + } + + return resp, nil +} diff --git a/vendor/github.com/peterhellberg/giphy/cmd/giphy/main.go b/vendor/github.com/peterhellberg/giphy/cmd/giphy/main.go new file mode 100644 index 00000000..b621f092 --- /dev/null +++ b/vendor/github.com/peterhellberg/giphy/cmd/giphy/main.go @@ -0,0 +1,140 @@ +/* + +A command line client for the Giphy API + +Installation + +Just go get the command: + + go get -u github.com/peterhellberg/giphy/cmd/giphy + +Configuration + +The command line client can be used straight out of the box, but +there are also a few environment variables that you can use in order +to override the default configuration. + + Environment variable | Default value + ----------------------|-------------- + GIPHY_API_KEY | dc6zaTOxFJmzC + GIPHY_RATING | g + GIPHY_LIMIT | 10 + GIPHY_BASE_URL_SCHEME | http + GIPHY_BASE_URL_HOST | api.giphy.com + GIPHY_BASE_PATH | /v1 + GIPHY_USER_AGENT | giphy.go + +Usage + +The command line client consists of a few sub commands. + + Commands: + search, s [args] + gif, id [args] + random, rand, r [args] + translate, trans, t [args] + trending, trend, tr [args] + +*/ +package main + +import ( + "fmt" + "os" + "strings" + + "github.com/peterhellberg/giphy" +) + +func main() { + g := giphy.DefaultClient + + if len(os.Args) < 2 { + fmt.Println(strings.Join([]string{ + "Commands:", + "search, s [args]", + "gif, id [args]", + "random, rand, r [args]", + "translate, trans, t [args]", + "trending, trend, tr [args]", + }, "\n\t")) + + return + } + + args := os.Args[1:] + + switch args[0] { + default: + search(g, args) + case "search", "s": + search(g, args[1:]) + case "gif", "id": + gif(g, args[1:]) + case "random", "rand", "r": + random(g, args[1:]) + case "translate", "trans", "t": + translate(g, args[1:]) + case "trending", "trend", "tr": + trending(g, args[1:]) + } +} + +func search(c *giphy.Client, args []string) { + res, err := c.Search(args) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + for _, d := range res.Data { + fmt.Println(d.Images.Original.URL) + } +} + +func gif(c *giphy.Client, args []string) { + if len(args) == 0 { + fmt.Println("missing Giphy id") + os.Exit(1) + } + + res, err := c.GIF(args[0]) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + fmt.Println(res.Data.Images.Original.URL) +} + +func random(c *giphy.Client, args []string) { + res, err := c.Random(args) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + fmt.Println(res.Data.ImageOriginalURL) +} + +func translate(c *giphy.Client, args []string) { + res, err := c.Translate(args) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + fmt.Println(res.Data.Images.Original.URL) +} + +func trending(c *giphy.Client, args []string) { + res, err := c.Trending(args) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + for _, d := range res.Data { + fmt.Println(d.Images.Original.URL) + } +} diff --git a/vendor/github.com/peterhellberg/giphy/env.go b/vendor/github.com/peterhellberg/giphy/env.go new file mode 100644 index 00000000..b41944ce --- /dev/null +++ b/vendor/github.com/peterhellberg/giphy/env.go @@ -0,0 +1,34 @@ +package giphy + +import ( + "os" + "strconv" +) + +// Env returns a string from the ENV, or fallback variable +func Env(key, fallback string) string { + v := os.Getenv(key) + if v != "" { + return v + } + + return fallback +} + +// EnvBool returns a bool from the ENV, or fallback variable +func EnvBool(key string, fallback bool) bool { + if b, err := strconv.ParseBool(os.Getenv(key)); err == nil { + return b + } + + return fallback +} + +// EnvInt returns an int from the ENV, or fallback variable +func EnvInt(key string, fallback int) int { + if i, err := strconv.Atoi(os.Getenv(key)); err == nil { + return i + } + + return fallback +} diff --git a/vendor/github.com/peterhellberg/giphy/errors.go b/vendor/github.com/peterhellberg/giphy/errors.go new file mode 100644 index 00000000..a98bc2f8 --- /dev/null +++ b/vendor/github.com/peterhellberg/giphy/errors.go @@ -0,0 +1,17 @@ +package giphy + +import "errors" + +var ( + // ErrNoImageFound is the error returned when no image was found + ErrNoImageFound = errors.New("no image found") + + // ErrUnknown is used for unknown errors from the Giphy API + ErrUnknown = errors.New("unknown error") + + // ErrNoTrendingImagesFound is returned when no trending images were found + ErrNoTrendingImagesFound = errors.New("no trending images found") + + // ErrNoRawData is returned if there was no data property in response + ErrNoRawData = errors.New("no raw data") +) diff --git a/vendor/github.com/peterhellberg/giphy/gif.go b/vendor/github.com/peterhellberg/giphy/gif.go new file mode 100644 index 00000000..8c317c97 --- /dev/null +++ b/vendor/github.com/peterhellberg/giphy/gif.go @@ -0,0 +1,44 @@ +package giphy + +import ( + "encoding/json" + "fmt" + "strings" +) + +// GIF returns a ID response from the Giphy API +func (c *Client) GIF(id string) (GIF, error) { + if strings.ContainsAny(id, "/&?") { + return GIF{}, fmt.Errorf("Invalid giphy id: `%v`", id) + } + + req, err := c.NewRequest("/gifs/" + id) + if err != nil { + return GIF{}, err + } + + var gif GIF + if _, err = c.Do(req, &gif); err != nil { + return GIF{}, err + } + + if gif.RawData == nil || gif.RawData[0] == '[' { + return GIF{}, ErrNoImageFound + } + + // Check if the first character in Data is a { + if gif.RawData[0] == '{' { + var d Data + + err = json.Unmarshal(gif.RawData, &d) + if err != nil { + return GIF{}, err + } + + gif.Data = d + + return gif, nil + } + + return GIF{}, ErrUnknown +} diff --git a/vendor/github.com/peterhellberg/giphy/random.go b/vendor/github.com/peterhellberg/giphy/random.go new file mode 100644 index 00000000..1374ed24 --- /dev/null +++ b/vendor/github.com/peterhellberg/giphy/random.go @@ -0,0 +1,37 @@ +package giphy + +import ( + "encoding/json" + "strings" +) + +// Random returns a random response from the Giphy API +func (c *Client) Random(args []string) (Random, error) { + argsStr := strings.Join(args, " ") + + req, err := c.NewRequest("/gifs/random?tag=" + argsStr) + if err != nil { + return Random{}, err + } + + var random Random + if _, err = c.Do(req, &random); err != nil { + return Random{}, err + } + + // Check if the first character in Data is a [ + if random.RawData == nil || random.RawData[0] == '[' { + return Random{}, ErrNoImageFound + } + + var d RandomData + + err = json.Unmarshal(random.RawData, &d) + if err != nil { + return Random{}, err + } + + random.Data = d + + return random, nil +} diff --git a/vendor/github.com/peterhellberg/giphy/search.go b/vendor/github.com/peterhellberg/giphy/search.go new file mode 100644 index 00000000..ca5af117 --- /dev/null +++ b/vendor/github.com/peterhellberg/giphy/search.go @@ -0,0 +1,24 @@ +package giphy + +import ( + "fmt" + "strings" +) + +// Search returns a search response from the Giphy API +func (c *Client) Search(args []string) (Search, error) { + argsStr := strings.Join(args, " ") + + path := fmt.Sprintf("/gifs/search?limit=%v&q=%s", c.Limit, argsStr) + req, err := c.NewRequest(path) + if err != nil { + return Search{}, err + } + + var search Search + if _, err = c.Do(req, &search); err != nil { + return Search{}, err + } + + return search, nil +} diff --git a/vendor/github.com/peterhellberg/giphy/translate.go b/vendor/github.com/peterhellberg/giphy/translate.go new file mode 100644 index 00000000..e3454b28 --- /dev/null +++ b/vendor/github.com/peterhellberg/giphy/translate.go @@ -0,0 +1,37 @@ +package giphy + +import ( + "encoding/json" + "strings" +) + +// Translate returns a translate response from the Giphy API +func (c *Client) Translate(args []string) (Translate, error) { + argsStr := strings.Join(args, " ") + + req, err := c.NewRequest("/gifs/translate?s=" + argsStr) + if err != nil { + return Translate{}, err + } + + var translate Translate + if _, err = c.Do(req, &translate); err != nil { + return Translate{}, err + } + + if len(translate.RawData) == 0 { + return Translate{}, ErrNoRawData + } + + // Check if the first character in Data is a [ + if translate.RawData[0] == '[' { + return Translate{}, ErrNoImageFound + } + + err = json.Unmarshal(translate.RawData, &translate.Data) + if err != nil { + return Translate{}, err + } + + return translate, nil +} diff --git a/vendor/github.com/peterhellberg/giphy/trending.go b/vendor/github.com/peterhellberg/giphy/trending.go new file mode 100644 index 00000000..671e0d51 --- /dev/null +++ b/vendor/github.com/peterhellberg/giphy/trending.go @@ -0,0 +1,23 @@ +package giphy + +import "fmt" + +// Trending returns a trending response from the Giphy API +func (c *Client) Trending(args ...[]string) (Trending, error) { + path := fmt.Sprintf("/gifs/trending?limit=%v", c.Limit) + req, err := c.NewRequest(path) + if err != nil { + return Trending{}, err + } + + var res Trending + if _, err = c.Do(req, &res); err != nil { + return res, err + } + + if len(res.Data) == 0 { + return res, ErrNoTrendingImagesFound + } + + return res, nil +} diff --git a/vendor/github.com/peterhellberg/giphy/types.go b/vendor/github.com/peterhellberg/giphy/types.go new file mode 100644 index 00000000..e7a7ebfc --- /dev/null +++ b/vendor/github.com/peterhellberg/giphy/types.go @@ -0,0 +1,116 @@ +package giphy + +import "encoding/json" + +// Search represents a search response from the Giphy API +type Search struct { + Data []Data `json:"data"` + Meta Meta `json:"meta"` + Pagination Pagination `json:"pagination"` +} + +// GIF represents a ID response from the Giphy API +type GIF struct { + Data Data + RawData json.RawMessage `json:"data"` + Meta Meta `json:"meta"` +} + +// Random represents a random response from the Giphy API +type Random struct { + Data RandomData + RawData json.RawMessage `json:"data"` + Meta Meta `json:"meta"` +} + +// Translate represents a translate response from the Giphy API +type Translate struct { + Data + RawData json.RawMessage `json:"data"` + Meta Meta `json:"meta"` +} + +// Trending represents a trending response from the Giphy API +type Trending struct { + Data []Data `json:"data"` + Meta Meta `json:"meta"` + Pagination Pagination `json:"pagination"` +} + +// Data contains all the fields in a data response from the Giphy API +type Data struct { + Type string `json:"type"` + ID string `json:"id"` + URL string `json:"url"` + BitlyGifURL string `json:"bitly_gif_url"` + BitlyURL string `json:"bitly_url"` + EmbedURL string `json:"embed_url"` + Username string `json:"username"` + Source string `json:"source"` + Rating string `json:"rating"` + Caption string `json:"caption"` + ContentURL string `json:"content_url"` + ImportDatetime string `json:"import_datetime"` + TrendingDatetime string `json:"trending_datetime"` + Images Images `json:"images"` +} + +// RandomData represents data section in random response from the Giphy API +type RandomData struct { + Type string `json:"type"` + ID string `json:"id"` + URL string `json:"url"` + ImageOriginalURL string `json:"image_original_url"` + ImageURL string `json:"image_url"` + ImageMp4URL string `json:"image_mp4_url"` + ImageFrames string `json:"image_frames"` + ImageWidth string `json:"image_width"` + ImageHeight string `json:"image_height"` + FixedHeightDownsampledURL string `json:"fixed_height_downsampled_url"` + FixedHeightDownsampledWidth string `json:"fixed_height_downsampled_width"` + FixedHeightDownsampledHeight string `json:"fixed_height_downsampled_height"` + FixedWidthDownsampledURL string `json:"fixed_width_downsampled_url"` + FixedWidthDownsampledWidth string `json:"fixed_width_downsampled_width"` + FixedWidthDownsampledHeight string `json:"fixed_width_downsampled_height"` + Rating string `json:"rating"` + Username string `json:"username"` + Caption string `json:"caption"` + Tags []string `json:"tags"` +} + +// Images represents all the different types of images +type Images struct { + FixedHeight Image `json:"fixed_height"` + FixedHeightStill Image `json:"fixed_height_still"` + FixedHeightDownsampled Image `json:"fixed_height_downsampled"` + FixedWidth Image `json:"fixed_width"` + FixedWidthStill Image `json:"fixed_width_still"` + FixedWidthDownsampled Image `json:"fixed_width_downsampled"` + Downsized Image `json:"downsized"` + DownsizedStill Image `json:"downsized_still"` + Original Image `json:"original"` + OriginalStill Image `json:"original_still"` +} + +// Image represents an image +type Image struct { + URL string `json:"url"` + Width string `json:"width"` + Height string `json:"height"` + Size string `json:"size,omitempty"` + Frames string `json:"frames,omitempty"` + Mp4 string `json:"mp4,omitempty"` +} + +// Pagination represents the pagination section in a Giphy API response +type Pagination struct { + TotalCount int `json:"total_count"` + Count int `json:"count"` + Offset int `json:"offset"` +} + +// Meta represents the meta section in a Giphy API response +type Meta struct { + Status int `json:"status"` + Msg string `json:"msg"` +} diff --git a/vendor/github.com/sorcix/irc/LICENSE b/vendor/github.com/sorcix/irc/LICENSE new file mode 100644 index 00000000..10cecc4c --- /dev/null +++ b/vendor/github.com/sorcix/irc/LICENSE @@ -0,0 +1,22 @@ +Copyright 2014 Vic Demuzere + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/sorcix/irc/constants.go b/vendor/github.com/sorcix/irc/constants.go new file mode 100644 index 00000000..d4812ba3 --- /dev/null +++ b/vendor/github.com/sorcix/irc/constants.go @@ -0,0 +1,298 @@ +// Copyright 2014 Vic Demuzere +// +// Use of this source code is governed by the MIT license. + +package irc + +// Various prefixes extracted from RFC1459. +const ( + Channel = '#' // Normal channel + Distributed = '&' // Distributed channel + + Owner = '~' // Channel owner +q (non-standard) + Admin = '&' // Channel admin +a (non-standard) + Operator = '@' // Channel operator +o + HalfOperator = '%' // Channel half operator +h (non-standard) + Voice = '+' // User has voice +v +) + +// User modes as defined by RFC1459 section 4.2.3.2. +const ( + UserModeInvisible = 'i' // User is invisible + UserModeServerNotices = 's' // User wants to receive server notices + UserModeWallops = 'w' // User wants to receive Wallops + UserModeOperator = 'o' // Server operator +) + +// Channel modes as defined by RFC1459 section 4.2.3.1 +const ( + ModeOperator = 'o' // Operator privileges + ModeVoice = 'v' // Ability to speak on a moderated channel + ModePrivate = 'p' // Private channel + ModeSecret = 's' // Secret channel + ModeInviteOnly = 'i' // Users can't join without invite + ModeTopic = 't' // Topic can only be set by an operator + ModeModerated = 'm' // Only voiced users and operators can talk + ModeLimit = 'l' // User limit + ModeKey = 'k' // Channel password + + ModeOwner = 'q' // Owner privileges (non-standard) + ModeAdmin = 'a' // Admin privileges (non-standard) + ModeHalfOperator = 'h' // Half-operator privileges (non-standard) +) + +// IRC commands extracted from RFC2812 section 3 and RFC2813 section 4. +const ( + PASS = "PASS" + NICK = "NICK" + USER = "USER" + OPER = "OPER" + MODE = "MODE" + SERVICE = "SERVICE" + QUIT = "QUIT" + SQUIT = "SQUIT" + JOIN = "JOIN" + PART = "PART" + TOPIC = "TOPIC" + NAMES = "NAMES" + LIST = "LIST" + INVITE = "INVITE" + KICK = "KICK" + PRIVMSG = "PRIVMSG" + NOTICE = "NOTICE" + MOTD = "MOTD" + LUSERS = "LUSERS" + VERSION = "VERSION" + STATS = "STATS" + LINKS = "LINKS" + TIME = "TIME" + CONNECT = "CONNECT" + TRACE = "TRACE" + ADMIN = "ADMIN" + INFO = "INFO" + SERVLIST = "SERVLIST" + SQUERY = "SQUERY" + WHO = "WHO" + WHOIS = "WHOIS" + WHOWAS = "WHOWAS" + KILL = "KILL" + PING = "PING" + PONG = "PONG" + ERROR = "ERROR" + AWAY = "AWAY" + REHASH = "REHASH" + DIE = "DIE" + RESTART = "RESTART" + SUMMON = "SUMMON" + USERS = "USERS" + WALLOPS = "WALLOPS" + USERHOST = "USERHOST" + ISON = "ISON" + SERVER = "SERVER" + NJOIN = "NJOIN" +) + +// Numeric IRC replies extracted from RFC2812 section 5. +const ( + RPL_WELCOME = "001" + RPL_YOURHOST = "002" + RPL_CREATED = "003" + RPL_MYINFO = "004" + RPL_BOUNCE = "005" + RPL_ISUPPORT = "005" + RPL_USERHOST = "302" + RPL_ISON = "303" + RPL_AWAY = "301" + RPL_UNAWAY = "305" + RPL_NOWAWAY = "306" + RPL_WHOISUSER = "311" + RPL_WHOISSERVER = "312" + RPL_WHOISOPERATOR = "313" + RPL_WHOISIDLE = "317" + RPL_ENDOFWHOIS = "318" + RPL_WHOISCHANNELS = "319" + RPL_WHOWASUSER = "314" + RPL_ENDOFWHOWAS = "369" + RPL_LISTSTART = "321" + RPL_LIST = "322" + RPL_LISTEND = "323" + RPL_UNIQOPIS = "325" + RPL_CHANNELMODEIS = "324" + RPL_NOTOPIC = "331" + RPL_TOPIC = "332" + RPL_INVITING = "341" + RPL_SUMMONING = "342" + RPL_INVITELIST = "346" + RPL_ENDOFINVITELIST = "347" + RPL_EXCEPTLIST = "348" + RPL_ENDOFEXCEPTLIST = "349" + RPL_VERSION = "351" + RPL_WHOREPLY = "352" + RPL_ENDOFWHO = "315" + RPL_NAMREPLY = "353" + RPL_ENDOFNAMES = "366" + RPL_LINKS = "364" + RPL_ENDOFLINKS = "365" + RPL_BANLIST = "367" + RPL_ENDOFBANLIST = "368" + RPL_INFO = "371" + RPL_ENDOFINFO = "374" + RPL_MOTDSTART = "375" + RPL_MOTD = "372" + RPL_ENDOFMOTD = "376" + RPL_YOUREOPER = "381" + RPL_REHASHING = "382" + RPL_YOURESERVICE = "383" + RPL_TIME = "391" + RPL_USERSSTART = "392" + RPL_USERS = "393" + RPL_ENDOFUSERS = "394" + RPL_NOUSERS = "395" + RPL_TRACELINK = "200" + RPL_TRACECONNECTING = "201" + RPL_TRACEHANDSHAKE = "202" + RPL_TRACEUNKNOWN = "203" + RPL_TRACEOPERATOR = "204" + RPL_TRACEUSER = "205" + RPL_TRACESERVER = "206" + RPL_TRACESERVICE = "207" + RPL_TRACENEWTYPE = "208" + RPL_TRACECLASS = "209" + RPL_TRACERECONNECT = "210" + RPL_TRACELOG = "261" + RPL_TRACEEND = "262" + RPL_STATSLINKINFO = "211" + RPL_STATSCOMMANDS = "212" + RPL_ENDOFSTATS = "219" + RPL_STATSUPTIME = "242" + RPL_STATSOLINE = "243" + RPL_UMODEIS = "221" + RPL_SERVLIST = "234" + RPL_SERVLISTEND = "235" + RPL_LUSERCLIENT = "251" + RPL_LUSEROP = "252" + RPL_LUSERUNKNOWN = "253" + RPL_LUSERCHANNELS = "254" + RPL_LUSERME = "255" + RPL_ADMINME = "256" + RPL_ADMINLOC1 = "257" + RPL_ADMINLOC2 = "258" + RPL_ADMINEMAIL = "259" + RPL_TRYAGAIN = "263" + ERR_NOSUCHNICK = "401" + ERR_NOSUCHSERVER = "402" + ERR_NOSUCHCHANNEL = "403" + ERR_CANNOTSENDTOCHAN = "404" + ERR_TOOMANYCHANNELS = "405" + ERR_WASNOSUCHNICK = "406" + ERR_TOOMANYTARGETS = "407" + ERR_NOSUCHSERVICE = "408" + ERR_NOORIGIN = "409" + ERR_NORECIPIENT = "411" + ERR_NOTEXTTOSEND = "412" + ERR_NOTOPLEVEL = "413" + ERR_WILDTOPLEVEL = "414" + ERR_BADMASK = "415" + ERR_UNKNOWNCOMMAND = "421" + ERR_NOMOTD = "422" + ERR_NOADMININFO = "423" + ERR_FILEERROR = "424" + ERR_NONICKNAMEGIVEN = "431" + ERR_ERRONEUSNICKNAME = "432" + ERR_NICKNAMEINUSE = "433" + ERR_NICKCOLLISION = "436" + ERR_UNAVAILRESOURCE = "437" + ERR_USERNOTINCHANNEL = "441" + ERR_NOTONCHANNEL = "442" + ERR_USERONCHANNEL = "443" + ERR_NOLOGIN = "444" + ERR_SUMMONDISABLED = "445" + ERR_USERSDISABLED = "446" + ERR_NOTREGISTERED = "451" + ERR_NEEDMOREPARAMS = "461" + ERR_ALREADYREGISTRED = "462" + ERR_NOPERMFORHOST = "463" + ERR_PASSWDMISMATCH = "464" + ERR_YOUREBANNEDCREEP = "465" + ERR_YOUWILLBEBANNED = "466" + ERR_KEYSET = "467" + ERR_CHANNELISFULL = "471" + ERR_UNKNOWNMODE = "472" + ERR_INVITEONLYCHAN = "473" + ERR_BANNEDFROMCHAN = "474" + ERR_BADCHANNELKEY = "475" + ERR_BADCHANMASK = "476" + ERR_NOCHANMODES = "477" + ERR_BANLISTFULL = "478" + ERR_NOPRIVILEGES = "481" + ERR_CHANOPRIVSNEEDED = "482" + ERR_CANTKILLSERVER = "483" + ERR_RESTRICTED = "484" + ERR_UNIQOPPRIVSNEEDED = "485" + ERR_NOOPERHOST = "491" + ERR_UMODEUNKNOWNFLAG = "501" + ERR_USERSDONTMATCH = "502" +) + +// IRC commands extracted from the IRCv3 spec at http://www.ircv3.org/. +const ( + CAP = "CAP" + CAP_LS = "LS" // Subcommand (param) + CAP_LIST = "LIST" // Subcommand (param) + CAP_REQ = "REQ" // Subcommand (param) + CAP_ACK = "ACK" // Subcommand (param) + CAP_NAK = "NAK" // Subcommand (param) + CAP_CLEAR = "CLEAR" // Subcommand (param) + CAP_END = "END" // Subcommand (param) + + AUTHENTICATE = "AUTHENTICATE" +) + +// Numeric IRC replies extracted from the IRCv3 spec. +const ( + RPL_LOGGEDIN = "900" + RPL_LOGGEDOUT = "901" + RPL_NICKLOCKED = "902" + RPL_SASLSUCCESS = "903" + ERR_SASLFAIL = "904" + ERR_SASLTOOLONG = "905" + ERR_SASLABORTED = "906" + ERR_SASLALREADY = "907" + RPL_SASLMECHS = "908" +) + +// RFC2812, section 5.3 +const ( + RPL_STATSCLINE = "213" + RPL_STATSNLINE = "214" + RPL_STATSILINE = "215" + RPL_STATSKLINE = "216" + RPL_STATSQLINE = "217" + RPL_STATSYLINE = "218" + RPL_SERVICEINFO = "231" + RPL_ENDOFSERVICES = "232" + RPL_SERVICE = "233" + RPL_STATSVLINE = "240" + RPL_STATSLLINE = "241" + RPL_STATSHLINE = "244" + RPL_STATSSLINE = "245" + RPL_STATSPING = "246" + RPL_STATSBLINE = "247" + RPL_STATSDLINE = "250" + RPL_NONE = "300" + RPL_WHOISCHANOP = "316" + RPL_KILLDONE = "361" + RPL_CLOSING = "362" + RPL_CLOSEEND = "363" + RPL_INFOSTART = "373" + RPL_MYPORTIS = "384" + ERR_NOSERVICEHOST = "492" +) + +// Other constants +const ( + ERR_TOOMANYMATCHES = "416" // Used on IRCNet + RPL_TOPICWHOTIME = "333" // From ircu, in use on Freenode + RPL_LOCALUSERS = "265" // From aircd, Hybrid, Hybrid, Bahamut, in use on Freenode + RPL_GLOBALUSERS = "266" // From aircd, Hybrid, Hybrid, Bahamut, in use on Freenode +) diff --git a/vendor/github.com/sorcix/irc/ctcp/ctcp.go b/vendor/github.com/sorcix/irc/ctcp/ctcp.go new file mode 100644 index 00000000..7ead788d --- /dev/null +++ b/vendor/github.com/sorcix/irc/ctcp/ctcp.go @@ -0,0 +1,144 @@ +// Copyright 2014 Vic Demuzere +// +// Use of this source code is governed by the MIT license. + +package ctcp + +// Sources: +// http://www.irchelp.org/irchelp/rfc/ctcpspec.html +// http://www.kvirc.net/doc/doc_ctcp_handling.html + +import ( + "fmt" + "runtime" + "strings" + "time" +) + +// Various constants used for formatting CTCP messages. +const ( + delimiter byte = 0x01 // Prefix and suffix for CTCP tagged messages. + space byte = 0x20 // Token separator + + empty = "" // The empty string + + timeFormat = time.RFC1123Z + versionFormat = "Go v%s (" + runtime.GOOS + ", " + runtime.GOARCH + ")" +) + +// Tags extracted from the CTCP spec. +const ( + ACTION = "ACTION" + PING = "PING" + PONG = "PONG" + VERSION = "VERSION" + USERINFO = "USERINFO" + CLIENTINFO = "CLIENTINFO" + FINGER = "FINGER" + SOURCE = "SOURCE" + TIME = "TIME" +) + +// Decode attempts to decode CTCP tagged data inside given message text. +// +// If the message text does not contain tagged data, ok will be false. +// +// ::= [ ] +// ::= 0x01 +// +func Decode(text string) (tag, message string, ok bool) { + + // Fast path, return if this text does not contain a CTCP message. + if len(text) < 3 || text[0] != delimiter || text[len(text)-1] != delimiter { + return empty, empty, false + } + + s := strings.IndexByte(text, space) + + if s < 0 { + + // Messages may contain only a tag. + return text[1 : len(text)-1], empty, true + } + + return text[1:s], text[s+1 : len(text)-1], true +} + +// Encode returns the IRC message text for CTCP tagged data. +// +// ::= [ ] +// ::= 0x01 +// +func Encode(tag, message string) (text string) { + + switch { + + // We can't build a valid CTCP tagged message without at least a tag. + case len(tag) <= 0: + return empty + + // Tagged data with a message + case len(message) > 0: + return string(delimiter) + tag + string(space) + message + string(delimiter) + + // Tagged data without a message + default: + return string(delimiter) + tag + string(delimiter) + + } +} + +// Action is a shortcut for Encode(ctcp.ACTION, message). +func Action(message string) string { + return Encode(ACTION, message) +} + +// Ping is a shortcut for Encode(ctcp.PING, message). +func Ping(message string) string { + return Encode(PING, message) +} + +// Pong is a shortcut for Encode(ctcp.PONG, message). +func Pong(message string) string { + return Encode(PONG, message) +} + +// Version is a shortcut for Encode(ctcp.VERSION, message). +func Version(message string) string { + return Encode(VERSION, message) +} + +// VersionReply is a shortcut for ENCODE(ctcp.VERSION, go version info). +func VersionReply() string { + return Encode(VERSION, fmt.Sprintf(versionFormat, runtime.Version())) +} + +// UserInfo is a shortcut for Encode(ctcp.USERINFO, message). +func UserInfo(message string) string { + return Encode(USERINFO, message) +} + +// ClientInfo is a shortcut for Encode(ctcp.CLIENTINFO, message). +func ClientInfo(message string) string { + return Encode(CLIENTINFO, message) +} + +// Finger is a shortcut for Encode(ctcp.FINGER, message). +func Finger(message string) string { + return Encode(FINGER, message) +} + +// Source is a shortcut for Encode(ctcp.SOURCE, message). +func Source(message string) string { + return Encode(SOURCE, message) +} + +// Time is a shortcut for Encode(ctcp.TIME, message). +func Time(message string) string { + return Encode(TIME, message) +} + +// TimeReply is a shortcut for Encode(ctcp.TIME, currenttime). +func TimeReply() string { + return Encode(TIME, time.Now().Format(timeFormat)) +} diff --git a/vendor/github.com/sorcix/irc/ctcp/doc.go b/vendor/github.com/sorcix/irc/ctcp/doc.go new file mode 100644 index 00000000..f0308d86 --- /dev/null +++ b/vendor/github.com/sorcix/irc/ctcp/doc.go @@ -0,0 +1,31 @@ +// Copyright 2014 Vic Demuzere +// +// Use of this source code is governed by the MIT license. + +// Package ctcp implements partial support for the Client-to-Client Protocol. +// +// CTCP defines extended messages using the standard PRIVMSG and NOTICE +// commands in IRC. This means that any CTCP messages are embedded inside the +// normal message text. Clients that don't support CTCP simply show +// the encoded message to the user. +// +// Most IRC clients support only a subset of the protocol, and only a few +// commands are actually used. This package aims to implement the most basic +// CTCP messages: a single command per IRC message. Quoting is not supported. +// +// Example using the irc.Message type: +// +// m := irc.ParseMessage(...) +// +// if tag, text, ok := ctcp.Decode(m.Trailing); ok { +// // This is a CTCP message. +// } else { +// // This is not a CTCP message. +// } +// +// Similar, for encoding messages: +// +// m.Trailing = ctcp.Encode("ACTION","wants a cookie!") +// +// Do not send a complete IRC message to Decode, it won't work. +package ctcp diff --git a/vendor/github.com/sorcix/irc/doc.go b/vendor/github.com/sorcix/irc/doc.go new file mode 100644 index 00000000..0effeb8a --- /dev/null +++ b/vendor/github.com/sorcix/irc/doc.go @@ -0,0 +1,36 @@ +// Copyright 2014 Vic Demuzere +// +// Use of this source code is governed by the MIT license. + +// Package irc allows your application to speak the IRC protocol. +// +// The Message and Prefix structs provide translation to and from raw IRC messages: +// +// // Parse the IRC-encoded data and store the result in a new struct: +// message := irc.ParseMessage(raw) +// +// // Translate back to a raw IRC message string: +// raw = message.String() +// +// Decoder and Encoder can be used to decode and encode messages in a stream: +// +// // Create a decoder that reads from given io.Reader +// dec := irc.NewDecoder(reader) +// +// // Decode the next IRC message +// message, err := dec.Decode() +// +// // Create an encoder that writes to given io.Writer +// enc := irc.NewEncoder(writer) +// +// // Send a message to the writer. +// enc.Encode(message) +// +// The Conn type combines an Encoder and Decoder for a duplex connection. +// +// c, err := irc.Dial("irc.server.net:6667") +// +// // Methods from both Encoder and Decoder are available +// message, err := c.Decode() +// +package irc diff --git a/vendor/github.com/sorcix/irc/message.go b/vendor/github.com/sorcix/irc/message.go new file mode 100644 index 00000000..088938dc --- /dev/null +++ b/vendor/github.com/sorcix/irc/message.go @@ -0,0 +1,308 @@ +// Copyright 2014 Vic Demuzere +// +// Use of this source code is governed by the MIT license. + +package irc + +import ( + "bytes" + "strings" +) + +// Various constants used for formatting IRC messages. +const ( + prefix byte = 0x3A // Prefix or last argument + prefixUser byte = 0x21 // Username + prefixHost byte = 0x40 // Hostname + space byte = 0x20 // Separator + + maxLength = 510 // Maximum length is 512 - 2 for the line endings. +) + +func cutsetFunc(r rune) bool { + // Characters to trim from prefixes/messages. + return r == '\r' || r == '\n' +} + +// Sender represents objects that are able to send messages to an IRC server. +// +// As there might be a message queue, it is possible that Send returns a nil +// error, but the message is not sent (yet). The error value is only used when +// it is certain that sending the message is impossible. +// +// This interface is not used inside this package, and shouldn't have been +// defined here in the first place. For backwards compatibility only. +type Sender interface { + Send(*Message) error +} + +// Prefix represents the prefix (sender) of an IRC message. +// See RFC1459 section 2.3.1. +// +// | [ '!' ] [ '@' ] +// +type Prefix struct { + Name string // Nick- or servername + User string // Username + Host string // Hostname +} + +// ParsePrefix takes a string and attempts to create a Prefix struct. +func ParsePrefix(raw string) (p *Prefix) { + + p = new(Prefix) + + user := indexByte(raw, prefixUser) + host := indexByte(raw, prefixHost) + + switch { + + case user > 0 && host > user: + p.Name = raw[:user] + p.User = raw[user+1 : host] + p.Host = raw[host+1:] + + case user > 0: + p.Name = raw[:user] + p.User = raw[user+1:] + + case host > 0: + p.Name = raw[:host] + p.Host = raw[host+1:] + + default: + p.Name = raw + + } + + return p +} + +// Len calculates the length of the string representation of this prefix. +func (p *Prefix) Len() (length int) { + length = len(p.Name) + if len(p.User) > 0 { + length = length + len(p.User) + 1 + } + if len(p.Host) > 0 { + length = length + len(p.Host) + 1 + } + return +} + +// Bytes returns a []byte representation of this prefix. +func (p *Prefix) Bytes() []byte { + buffer := new(bytes.Buffer) + p.writeTo(buffer) + return buffer.Bytes() +} + +// String returns a string representation of this prefix. +func (p *Prefix) String() (s string) { + // Benchmarks revealed that in this case simple string concatenation + // is actually faster than using a ByteBuffer as in (*Message).String() + s = p.Name + if len(p.User) > 0 { + s = s + string(prefixUser) + p.User + } + if len(p.Host) > 0 { + s = s + string(prefixHost) + p.Host + } + return +} + +// IsHostmask returns true if this prefix looks like a user hostmask. +func (p *Prefix) IsHostmask() bool { + return len(p.User) > 0 && len(p.Host) > 0 +} + +// IsServer returns true if this prefix looks like a server name. +func (p *Prefix) IsServer() bool { + return len(p.User) <= 0 && len(p.Host) <= 0 // && indexByte(p.Name, '.') > 0 +} + +// writeTo is an utility function to write the prefix to the bytes.Buffer in Message.String(). +func (p *Prefix) writeTo(buffer *bytes.Buffer) { + buffer.WriteString(p.Name) + if len(p.User) > 0 { + buffer.WriteByte(prefixUser) + buffer.WriteString(p.User) + } + if len(p.Host) > 0 { + buffer.WriteByte(prefixHost) + buffer.WriteString(p.Host) + } + return +} + +// Message represents an IRC protocol message. +// See RFC1459 section 2.3.1. +// +// ::= [':' ] +// ::= | [ '!' ] [ '@' ] +// ::= { } | +// ::= ' ' { ' ' } +// ::= [ ':' | ] +// +// ::= +// ::= +// +// ::= CR LF +type Message struct { + *Prefix + Command string + Params []string + Trailing string + + // When set to true, the trailing prefix (:) will be added even if the trailing message is empty. + EmptyTrailing bool +} + +// ParseMessage takes a string and attempts to create a Message struct. +// Returns nil if the Message is invalid. +func ParseMessage(raw string) (m *Message) { + + // Ignore empty messages. + if raw = strings.TrimFunc(raw, cutsetFunc); len(raw) < 2 { + return nil + } + + i, j := 0, 0 + + m = new(Message) + + if raw[0] == prefix { + + // Prefix ends with a space. + i = indexByte(raw, space) + + // Prefix string must not be empty if the indicator is present. + if i < 2 { + return nil + } + + m.Prefix = ParsePrefix(raw[1:i]) + + // Skip space at the end of the prefix + i++ + } + + // Find end of command + j = i + indexByte(raw[i:], space) + + // Extract command + if j > i { + m.Command = strings.ToUpper(raw[i:j]) + } else { + m.Command = strings.ToUpper(raw[i:]) + + // We're done here! + return m + } + + // Skip space after command + j++ + + // Find prefix for trailer + i = indexByte(raw[j:], prefix) + + if i < 0 || raw[j+i-1] != space { + + // There is no trailing argument! + m.Params = strings.Split(raw[j:], string(space)) + + // We're done here! + return m + } + + // Compensate for index on substring + i = i + j + + // Check if we need to parse arguments. + if i > j { + m.Params = strings.Split(raw[j:i-1], string(space)) + } + + m.Trailing = raw[i+1:] + + // We need to re-encode the trailing argument even if it was empty. + if len(m.Trailing) <= 0 { + m.EmptyTrailing = true + } + + return m + +} + +// Len calculates the length of the string representation of this message. +func (m *Message) Len() (length int) { + + if m.Prefix != nil { + length = m.Prefix.Len() + 2 // Include prefix and trailing space + } + + length = length + len(m.Command) + + if len(m.Params) > 0 { + length = length + len(m.Params) + for _, param := range m.Params { + length = length + len(param) + } + } + + if len(m.Trailing) > 0 || m.EmptyTrailing { + length = length + len(m.Trailing) + 2 // Include prefix and space + } + + return +} + +// Bytes returns a []byte representation of this message. +// +// As noted in rfc2812 section 2.3, messages should not exceed 512 characters +// in length. This method forces that limit by discarding any characters +// exceeding the length limit. +func (m *Message) Bytes() []byte { + + buffer := new(bytes.Buffer) + + // Message prefix + if m.Prefix != nil { + buffer.WriteByte(prefix) + m.Prefix.writeTo(buffer) + buffer.WriteByte(space) + } + + // Command is required + buffer.WriteString(m.Command) + + // Space separated list of arguments + if len(m.Params) > 0 { + buffer.WriteByte(space) + buffer.WriteString(strings.Join(m.Params, string(space))) + } + + if len(m.Trailing) > 0 || m.EmptyTrailing { + buffer.WriteByte(space) + buffer.WriteByte(prefix) + buffer.WriteString(m.Trailing) + } + + // We need the limit the buffer length. + if buffer.Len() > (maxLength) { + buffer.Truncate(maxLength) + } + + return buffer.Bytes() +} + +// String returns a string representation of this message. +// +// As noted in rfc2812 section 2.3, messages should not exceed 512 characters +// in length. This method forces that limit by discarding any characters +// exceeding the length limit. +func (m *Message) String() string { + return string(m.Bytes()) +} diff --git a/vendor/github.com/sorcix/irc/stream.go b/vendor/github.com/sorcix/irc/stream.go new file mode 100644 index 00000000..c4af9af1 --- /dev/null +++ b/vendor/github.com/sorcix/irc/stream.go @@ -0,0 +1,134 @@ +// Copyright 2014 Vic Demuzere +// +// Use of this source code is governed by the MIT license. + +package irc + +import ( + "bufio" + "io" + "net" + "sync" +) + +// Messages are delimited with CR and LF line endings, +// we're using the last one to split the stream. Both are removed +// during message parsing. +const delim byte = '\n' + +var endline = []byte("\r\n") + +// A Conn represents an IRC network protocol connection. +// It consists of an Encoder and Decoder to manage I/O. +type Conn struct { + Encoder + Decoder + + conn io.ReadWriteCloser +} + +// NewConn returns a new Conn using rwc for I/O. +func NewConn(rwc io.ReadWriteCloser) *Conn { + return &Conn{ + Encoder: Encoder{ + writer: rwc, + }, + Decoder: Decoder{ + reader: bufio.NewReader(rwc), + }, + conn: rwc, + } +} + +// Dial connects to the given address using net.Dial and +// then returns a new Conn for the connection. +func Dial(addr string) (*Conn, error) { + c, err := net.Dial("tcp", addr) + + if err != nil { + return nil, err + } + + return NewConn(c), nil +} + +// Close closes the underlying ReadWriteCloser. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// A Decoder reads Message objects from an input stream. +type Decoder struct { + reader *bufio.Reader + line string + mu sync.Mutex +} + +// NewDecoder returns a new Decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + reader: bufio.NewReader(r), + } +} + +// Decode attempts to read a single Message from the stream. +// +// Returns a non-nil error if the read failed. +func (dec *Decoder) Decode() (m *Message, err error) { + + dec.mu.Lock() + dec.line, err = dec.reader.ReadString(delim) + dec.mu.Unlock() + + if err != nil { + return nil, err + } + + return ParseMessage(dec.line), nil +} + +// An Encoder writes Message objects to an output stream. +type Encoder struct { + writer io.Writer + mu sync.Mutex +} + +// NewEncoder returns a new Encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + writer: w, + } +} + +// Encode writes the IRC encoding of m to the stream. +// +// This method may be used from multiple goroutines. +// +// Returns an non-nil error if the write to the underlying stream stopped early. +func (enc *Encoder) Encode(m *Message) (err error) { + + _, err = enc.Write(m.Bytes()) + + return +} + +// Write writes len(p) bytes from p followed by CR+LF. +// +// This method can be used simultaneously from multiple goroutines, +// it guarantees to serialize access. However, writing a single IRC message +// using multiple Write calls will cause corruption. +func (enc *Encoder) Write(p []byte) (n int, err error) { + + enc.mu.Lock() + n, err = enc.writer.Write(p) + + if err != nil { + enc.mu.Unlock() + return + } + + _, err = enc.writer.Write(endline) + enc.mu.Unlock() + + return +} diff --git a/vendor/github.com/sorcix/irc/strings.go b/vendor/github.com/sorcix/irc/strings.go new file mode 100644 index 00000000..550739f4 --- /dev/null +++ b/vendor/github.com/sorcix/irc/strings.go @@ -0,0 +1,17 @@ +// Copyright 2014 Vic Demuzere +// +// Use of this source code is governed by the MIT license. + +// +build go1.2 + +// Documented in strings_legacy.go + +package irc + +import ( + "strings" +) + +func indexByte(s string, c byte) int { + return strings.IndexByte(s, c) +} diff --git a/vendor/github.com/sorcix/irc/strings_legacy.go b/vendor/github.com/sorcix/irc/strings_legacy.go new file mode 100644 index 00000000..f9328ec7 --- /dev/null +++ b/vendor/github.com/sorcix/irc/strings_legacy.go @@ -0,0 +1,22 @@ +// Copyright 2014 Vic Demuzere +// +// Use of this source code is governed by the MIT license. + +// +build !go1.2 + +// Debian Wheezy only ships Go 1.0: +// https://github.com/sorcix/irc/issues/4 +// +// This code may be removed when Wheezy is no longer supported. + +package irc + +// indexByte implements strings.IndexByte for Go versions < 1.2. +func indexByte(s string, c byte) int { + for i := range s { + if s[i] == c { + return i + } + } + return -1 +} diff --git a/vendor/github.com/thoj/go-ircevent/LICENSE b/vendor/github.com/thoj/go-ircevent/LICENSE new file mode 100644 index 00000000..d6bf3577 --- /dev/null +++ b/vendor/github.com/thoj/go-ircevent/LICENSE @@ -0,0 +1,27 @@ +// Copyright (c) 2009 Thomas Jager. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/thoj/go-ircevent/irc.go b/vendor/github.com/thoj/go-ircevent/irc.go new file mode 100644 index 00000000..9043e888 --- /dev/null +++ b/vendor/github.com/thoj/go-ircevent/irc.go @@ -0,0 +1,473 @@ +// Copyright 2009 Thomas Jager All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +This package provides an event based IRC client library. It allows to +register callbacks for the events you need to handle. Its features +include handling standard CTCP, reconnecting on errors and detecting +stones servers. +Details of the IRC protocol can be found in the following RFCs: +https://tools.ietf.org/html/rfc1459 +https://tools.ietf.org/html/rfc2810 +https://tools.ietf.org/html/rfc2811 +https://tools.ietf.org/html/rfc2812 +https://tools.ietf.org/html/rfc2813 +The details of the client-to-client protocol (CTCP) can be found here: http://www.irchelp.org/irchelp/rfc/ctcpspec.html +*/ + +package irc + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "os" + "strconv" + "strings" + "time" +) + +const ( + VERSION = "go-ircevent v2.1" +) + +var ErrDisconnected = errors.New("Disconnect Called") + +// Read data from a connection. To be used as a goroutine. +func (irc *Connection) readLoop() { + defer irc.Done() + br := bufio.NewReaderSize(irc.socket, 512) + + errChan := irc.ErrorChan() + + for { + select { + case <-irc.end: + return + default: + // Set a read deadline based on the combined timeout and ping frequency + // We should ALWAYS have received a response from the server within the timeout + // after our own pings + if irc.socket != nil { + irc.socket.SetReadDeadline(time.Now().Add(irc.Timeout + irc.PingFreq)) + } + + msg, err := br.ReadString('\n') + + // We got past our blocking read, so bin timeout + if irc.socket != nil { + var zero time.Time + irc.socket.SetReadDeadline(zero) + } + + if err != nil { + errChan <- err + return + } + + if irc.Debug { + irc.Log.Printf("<-- %s\n", strings.TrimSpace(msg)) + } + + irc.lastMessage = time.Now() + event, err := parseToEvent(msg) + event.Connection = irc + if err == nil { + /* XXX: len(args) == 0: args should be empty */ + irc.RunCallbacks(event) + } + } + } +} + +//Parse raw irc messages +func parseToEvent(msg string) (*Event, error) { + msg = strings.TrimSuffix(msg, "\n") //Remove \r\n + msg = strings.TrimSuffix(msg, "\r") + event := &Event{Raw: msg} + if len(msg) < 5 { + return nil, errors.New("Malformed msg from server") + } + if msg[0] == ':' { + if i := strings.Index(msg, " "); i > -1 { + event.Source = msg[1:i] + msg = msg[i+1 : len(msg)] + + } else { + return nil, errors.New("Malformed msg from server") + } + + if i, j := strings.Index(event.Source, "!"), strings.Index(event.Source, "@"); i > -1 && j > -1 && i < j { + event.Nick = event.Source[0:i] + event.User = event.Source[i+1 : j] + event.Host = event.Source[j+1 : len(event.Source)] + } + } + + split := strings.SplitN(msg, " :", 2) + args := strings.Split(split[0], " ") + event.Code = strings.ToUpper(args[0]) + event.Arguments = args[1:] + if len(split) > 1 { + event.Arguments = append(event.Arguments, split[1]) + } + return event, nil + +} + +// Loop to write to a connection. To be used as a goroutine. +func (irc *Connection) writeLoop() { + defer irc.Done() + errChan := irc.ErrorChan() + for { + select { + case <-irc.end: + return + case b, ok := <-irc.pwrite: + if !ok || b == "" || irc.socket == nil { + return + } + + if irc.Debug { + irc.Log.Printf("--> %s\n", strings.TrimSpace(b)) + } + + // Set a write deadline based on the time out + irc.socket.SetWriteDeadline(time.Now().Add(irc.Timeout)) + + _, err := irc.socket.Write([]byte(b)) + + // Past blocking write, bin timeout + var zero time.Time + irc.socket.SetWriteDeadline(zero) + + if err != nil { + errChan <- err + return + } + } + } + return +} + +// Pings the server if we have not received any messages for 5 minutes +// to keep the connection alive. To be used as a goroutine. +func (irc *Connection) pingLoop() { + defer irc.Done() + ticker := time.NewTicker(1 * time.Minute) // Tick every minute for monitoring + ticker2 := time.NewTicker(irc.PingFreq) // Tick at the ping frequency. + for { + select { + case <-ticker.C: + //Ping if we haven't received anything from the server within the keep alive period + if time.Since(irc.lastMessage) >= irc.KeepAlive { + irc.SendRawf("PING %d", time.Now().UnixNano()) + } + case <-ticker2.C: + //Ping at the ping frequency + irc.SendRawf("PING %d", time.Now().UnixNano()) + //Try to recapture nickname if it's not as configured. + if irc.nick != irc.nickcurrent { + irc.nickcurrent = irc.nick + irc.SendRawf("NICK %s", irc.nick) + } + case <-irc.end: + ticker.Stop() + ticker2.Stop() + return + } + } +} + +// Main loop to control the connection. +func (irc *Connection) Loop() { + errChan := irc.ErrorChan() + for !irc.quit { + err := <-errChan + irc.Log.Printf("Error, disconnected: %s\n", err) + for !irc.quit { + if err = irc.Reconnect(); err != nil { + irc.Log.Printf("Error while reconnecting: %s\n", err) + time.Sleep(60 * time.Second) + } else { + errChan = irc.ErrorChan() + break + } + } + } +} + +// Quit the current connection and disconnect from the server +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.1.6 +func (irc *Connection) Quit() { + quit := "QUIT" + + if irc.QuitMessage != "" { + quit = fmt.Sprintf("QUIT :%s", irc.QuitMessage) + } + + irc.SendRaw(quit) + irc.stopped = true + irc.quit = true +} + +// Use the connection to join a given channel. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.2.1 +func (irc *Connection) Join(channel string) { + irc.pwrite <- fmt.Sprintf("JOIN %s\r\n", channel) +} + +// Leave a given channel. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.2.2 +func (irc *Connection) Part(channel string) { + irc.pwrite <- fmt.Sprintf("PART %s\r\n", channel) +} + +// Send a notification to a nickname. This is similar to Privmsg but must not receive replies. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.4.2 +func (irc *Connection) Notice(target, message string) { + irc.pwrite <- fmt.Sprintf("NOTICE %s :%s\r\n", target, message) +} + +// Send a formated notification to a nickname. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.4.2 +func (irc *Connection) Noticef(target, format string, a ...interface{}) { + irc.Notice(target, fmt.Sprintf(format, a...)) +} + +// Send (action) message to a target (channel or nickname). +// No clear RFC on this one... +func (irc *Connection) Action(target, message string) { + irc.pwrite <- fmt.Sprintf("PRIVMSG %s :\001ACTION %s\001\r\n", target, message) +} + +// Send formatted (action) message to a target (channel or nickname). +func (irc *Connection) Actionf(target, format string, a ...interface{}) { + irc.Action(target, fmt.Sprintf(format, a...)) +} + +// Send (private) message to a target (channel or nickname). +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.4.1 +func (irc *Connection) Privmsg(target, message string) { + irc.pwrite <- fmt.Sprintf("PRIVMSG %s :%s\r\n", target, message) +} + +// Send formated string to specified target (channel or nickname). +func (irc *Connection) Privmsgf(target, format string, a ...interface{}) { + irc.Privmsg(target, fmt.Sprintf(format, a...)) +} + +// Kick from with . For no message, pass empty string ("") +func (irc *Connection) Kick(user, channel, msg string) { + var cmd bytes.Buffer + cmd.WriteString(fmt.Sprintf("KICK %s %s", channel, user)) + if msg != "" { + cmd.WriteString(fmt.Sprintf(" :%s", msg)) + } + cmd.WriteString("\r\n") + irc.pwrite <- cmd.String() +} + +// Kick all from with . For no message, pass +// empty string ("") +func (irc *Connection) MultiKick(users []string, channel string, msg string) { + var cmd bytes.Buffer + cmd.WriteString(fmt.Sprintf("KICK %s %s", channel, strings.Join(users, ","))) + if msg != "" { + cmd.WriteString(fmt.Sprintf(" :%s", msg)) + } + cmd.WriteString("\r\n") + irc.pwrite <- cmd.String() +} + +// Send raw string. +func (irc *Connection) SendRaw(message string) { + irc.pwrite <- message + "\r\n" +} + +// Send raw formated string. +func (irc *Connection) SendRawf(format string, a ...interface{}) { + irc.SendRaw(fmt.Sprintf(format, a...)) +} + +// Set (new) nickname. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.1.2 +func (irc *Connection) Nick(n string) { + irc.nick = n + irc.SendRawf("NICK %s", n) +} + +// Determine nick currently used with the connection. +func (irc *Connection) GetNick() string { + return irc.nickcurrent +} + +// Query information about a particular nickname. +// RFC 1459: https://tools.ietf.org/html/rfc1459#section-4.5.2 +func (irc *Connection) Whois(nick string) { + irc.SendRawf("WHOIS %s", nick) +} + +// Query information about a given nickname in the server. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.5.1 +func (irc *Connection) Who(nick string) { + irc.SendRawf("WHO %s", nick) +} + +// Set different modes for a target (channel or nickname). +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.2.3 +func (irc *Connection) Mode(target string, modestring ...string) { + if len(modestring) > 0 { + mode := strings.Join(modestring, " ") + irc.SendRawf("MODE %s %s", target, mode) + return + } + irc.SendRawf("MODE %s", target) +} + +func (irc *Connection) ErrorChan() chan error { + return irc.Error +} + +// Returns true if the connection is connected to an IRC server. +func (irc *Connection) Connected() bool { + return !irc.stopped +} + +// A disconnect sends all buffered messages (if possible), +// stops all goroutines and then closes the socket. +func (irc *Connection) Disconnect() { + for event := range irc.events { + irc.ClearCallback(event) + } + if irc.end != nil { + close(irc.end) + } + + irc.end = nil + + if irc.pwrite != nil { + close(irc.pwrite) + } + + irc.Wait() + if irc.socket != nil { + irc.socket.Close() + } + irc.socket = nil + irc.ErrorChan() <- ErrDisconnected +} + +// Reconnect to a server using the current connection. +func (irc *Connection) Reconnect() error { + if irc.end != nil { + close(irc.end) + } + + irc.end = nil + + irc.Wait() //make sure that wait group is cleared ensuring that all spawned goroutines have completed + + irc.end = make(chan struct{}) + return irc.Connect(irc.Server) +} + +// Connect to a given server using the current connection configuration. +// This function also takes care of identification if a password is provided. +// RFC 1459 details: https://tools.ietf.org/html/rfc1459#section-4.1 +func (irc *Connection) Connect(server string) error { + irc.Server = server + // mark Server as stopped since there can be an error during connect + irc.stopped = true + + // make sure everything is ready for connection + if len(irc.Server) == 0 { + return errors.New("empty 'server'") + } + if strings.Count(irc.Server, ":") != 1 { + return errors.New("wrong number of ':' in address") + } + if strings.Index(irc.Server, ":") == 0 { + return errors.New("hostname is missing") + } + if strings.Index(irc.Server, ":") == len(irc.Server)-1 { + return errors.New("port missing") + } + // check for valid range + ports := strings.Split(irc.Server, ":")[1] + port, err := strconv.Atoi(ports) + if err != nil { + return errors.New("extracting port failed") + } + if !((port >= 0) && (port <= 65535)) { + return errors.New("port number outside valid range") + } + if irc.Log == nil { + return errors.New("'Log' points to nil") + } + if len(irc.nick) == 0 { + return errors.New("empty 'nick'") + } + if len(irc.user) == 0 { + return errors.New("empty 'user'") + } + + if irc.UseTLS { + dialer := &net.Dialer{Timeout: irc.Timeout} + irc.socket, err = tls.DialWithDialer(dialer, "tcp", irc.Server, irc.TLSConfig) + } else { + irc.socket, err = net.DialTimeout("tcp", irc.Server, irc.Timeout) + } + if err != nil { + return err + } + + irc.stopped = false + irc.Log.Printf("Connected to %s (%s)\n", irc.Server, irc.socket.RemoteAddr()) + + irc.pwrite = make(chan string, 10) + irc.Error = make(chan error, 2) + irc.Add(3) + go irc.readLoop() + go irc.writeLoop() + go irc.pingLoop() + if len(irc.Password) > 0 { + irc.pwrite <- fmt.Sprintf("PASS %s\r\n", irc.Password) + } + irc.pwrite <- fmt.Sprintf("NICK %s\r\n", irc.nick) + irc.pwrite <- fmt.Sprintf("USER %s 0.0.0.0 0.0.0.0 :%s\r\n", irc.user, irc.user) + return nil +} + +// Create a connection with the (publicly visible) nickname and username. +// The nickname is later used to address the user. Returns nil if nick +// or user are empty. +func IRC(nick, user string) *Connection { + // catch invalid values + if len(nick) == 0 { + return nil + } + if len(user) == 0 { + return nil + } + + irc := &Connection{ + nick: nick, + nickcurrent: nick, + user: user, + Log: log.New(os.Stdout, "", log.LstdFlags), + end: make(chan struct{}), + Version: VERSION, + KeepAlive: 4 * time.Minute, + Timeout: 1 * time.Minute, + PingFreq: 15 * time.Minute, + QuitMessage: "", + } + irc.setupCallbacks() + return irc +} diff --git a/vendor/github.com/thoj/go-ircevent/irc_callback.go b/vendor/github.com/thoj/go-ircevent/irc_callback.go new file mode 100644 index 00000000..109cbb1f --- /dev/null +++ b/vendor/github.com/thoj/go-ircevent/irc_callback.go @@ -0,0 +1,221 @@ +package irc + +import ( + "strconv" + "strings" + "time" +) + +// Register a callback to a connection and event code. A callback is a function +// which takes only an Event pointer as parameter. Valid event codes are all +// IRC/CTCP commands and error/response codes. This function returns the ID of +// the registered callback for later management. +func (irc *Connection) AddCallback(eventcode string, callback func(*Event)) int { + eventcode = strings.ToUpper(eventcode) + id := 0 + if _, ok := irc.events[eventcode]; !ok { + irc.events[eventcode] = make(map[int]func(*Event)) + id = 0 + } else { + id = len(irc.events[eventcode]) + } + irc.events[eventcode][id] = callback + return id +} + +// Remove callback i (ID) from the given event code. This functions returns +// true upon success, false if any error occurs. +func (irc *Connection) RemoveCallback(eventcode string, i int) bool { + eventcode = strings.ToUpper(eventcode) + + if event, ok := irc.events[eventcode]; ok { + if _, ok := event[i]; ok { + delete(irc.events[eventcode], i) + return true + } + irc.Log.Printf("Event found, but no callback found at id %s\n", i) + return false + } + + irc.Log.Println("Event not found") + return false +} + +// Remove all callbacks from a given event code. It returns true +// if given event code is found and cleared. +func (irc *Connection) ClearCallback(eventcode string) bool { + eventcode = strings.ToUpper(eventcode) + + if _, ok := irc.events[eventcode]; ok { + irc.events[eventcode] = make(map[int]func(*Event)) + return true + } + + irc.Log.Println("Event not found") + return false +} + +// Replace callback i (ID) associated with a given event code with a new callback function. +func (irc *Connection) ReplaceCallback(eventcode string, i int, callback func(*Event)) { + eventcode = strings.ToUpper(eventcode) + + if event, ok := irc.events[eventcode]; ok { + if _, ok := event[i]; ok { + event[i] = callback + return + } + irc.Log.Printf("Event found, but no callback found at id %s\n", i) + } + irc.Log.Printf("Event not found. Use AddCallBack\n") +} + +// Execute all callbacks associated with a given event. +func (irc *Connection) RunCallbacks(event *Event) { + msg := event.Message() + if event.Code == "PRIVMSG" && len(msg) > 2 && msg[0] == '\x01' { + event.Code = "CTCP" //Unknown CTCP + + if i := strings.LastIndex(msg, "\x01"); i > 0 { + msg = msg[1:i] + } else { + irc.Log.Printf("Invalid CTCP Message: %s\n", strconv.Quote(msg)) + return + } + + if msg == "VERSION" { + event.Code = "CTCP_VERSION" + + } else if msg == "TIME" { + event.Code = "CTCP_TIME" + + } else if strings.HasPrefix(msg, "PING") { + event.Code = "CTCP_PING" + + } else if msg == "USERINFO" { + event.Code = "CTCP_USERINFO" + + } else if msg == "CLIENTINFO" { + event.Code = "CTCP_CLIENTINFO" + + } else if strings.HasPrefix(msg, "ACTION") { + event.Code = "CTCP_ACTION" + if len(msg) > 6 { + msg = msg[7:] + } else { + msg = "" + } + } + + event.Arguments[len(event.Arguments)-1] = msg + } + + if callbacks, ok := irc.events[event.Code]; ok { + if irc.VerboseCallbackHandler { + irc.Log.Printf("%v (%v) >> %#v\n", event.Code, len(callbacks), event) + } + + for _, callback := range callbacks { + callback(event) + } + } else if irc.VerboseCallbackHandler { + irc.Log.Printf("%v (0) >> %#v\n", event.Code, event) + } + + if callbacks, ok := irc.events["*"]; ok { + if irc.VerboseCallbackHandler { + irc.Log.Printf("%v (0) >> %#v\n", event.Code, event) + } + + for _, callback := range callbacks { + callback(event) + } + } +} + +// Set up some initial callbacks to handle the IRC/CTCP protocol. +func (irc *Connection) setupCallbacks() { + irc.events = make(map[string]map[int]func(*Event)) + + //Handle error events. This has to be called in a new thred to allow + //readLoop to exit + irc.AddCallback("ERROR", func(e *Event) { go irc.Disconnect() }) + + //Handle ping events + irc.AddCallback("PING", func(e *Event) { irc.SendRaw("PONG :" + e.Message()) }) + + //Version handler + irc.AddCallback("CTCP_VERSION", func(e *Event) { + irc.SendRawf("NOTICE %s :\x01VERSION %s\x01", e.Nick, irc.Version) + }) + + irc.AddCallback("CTCP_USERINFO", func(e *Event) { + irc.SendRawf("NOTICE %s :\x01USERINFO %s\x01", e.Nick, irc.user) + }) + + irc.AddCallback("CTCP_CLIENTINFO", func(e *Event) { + irc.SendRawf("NOTICE %s :\x01CLIENTINFO PING VERSION TIME USERINFO CLIENTINFO\x01", e.Nick) + }) + + irc.AddCallback("CTCP_TIME", func(e *Event) { + ltime := time.Now() + irc.SendRawf("NOTICE %s :\x01TIME %s\x01", e.Nick, ltime.String()) + }) + + irc.AddCallback("CTCP_PING", func(e *Event) { irc.SendRawf("NOTICE %s :\x01%s\x01", e.Nick, e.Message()) }) + + // 437: ERR_UNAVAILRESOURCE " :Nick/channel is temporarily unavailable" + // Add a _ to current nick. If irc.nickcurrent is empty this cannot + // work. It has to be set somewhere first in case the nick is already + // taken or unavailable from the beginning. + irc.AddCallback("437", func(e *Event) { + // If irc.nickcurrent hasn't been set yet, set to irc.nick + if irc.nickcurrent == "" { + irc.nickcurrent = irc.nick + } + + if len(irc.nickcurrent) > 8 { + irc.nickcurrent = "_" + irc.nickcurrent + } else { + irc.nickcurrent = irc.nickcurrent + "_" + } + irc.SendRawf("NICK %s", irc.nickcurrent) + }) + + // 433: ERR_NICKNAMEINUSE " :Nickname is already in use" + // Add a _ to current nick. + irc.AddCallback("433", func(e *Event) { + // If irc.nickcurrent hasn't been set yet, set to irc.nick + if irc.nickcurrent == "" { + irc.nickcurrent = irc.nick + } + + if len(irc.nickcurrent) > 8 { + irc.nickcurrent = "_" + irc.nickcurrent + } else { + irc.nickcurrent = irc.nickcurrent + "_" + } + irc.SendRawf("NICK %s", irc.nickcurrent) + }) + + irc.AddCallback("PONG", func(e *Event) { + ns, _ := strconv.ParseInt(e.Message(), 10, 64) + delta := time.Duration(time.Now().UnixNano() - ns) + if irc.Debug { + irc.Log.Printf("Lag: %vs\n", delta) + } + }) + + // NICK Define a nickname. + // Set irc.nickcurrent to the new nick actually used in this connection. + irc.AddCallback("NICK", func(e *Event) { + if e.Nick == irc.nick { + irc.nickcurrent = e.Message() + } + }) + + // 1: RPL_WELCOME "Welcome to the Internet Relay Network !@" + // Set irc.nickcurrent to the actually used nick in this connection. + irc.AddCallback("001", func(e *Event) { + irc.nickcurrent = e.Arguments[0] + }) +} diff --git a/vendor/github.com/thoj/go-ircevent/irc_struct.go b/vendor/github.com/thoj/go-ircevent/irc_struct.go new file mode 100644 index 00000000..3e4a438f --- /dev/null +++ b/vendor/github.com/thoj/go-ircevent/irc_struct.go @@ -0,0 +1,68 @@ +// Copyright 2009 Thomas Jager All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package irc + +import ( + "crypto/tls" + "log" + "net" + "sync" + "time" +) + +type Connection struct { + sync.WaitGroup + Debug bool + Error chan error + Password string + UseTLS bool + TLSConfig *tls.Config + Version string + Timeout time.Duration + PingFreq time.Duration + KeepAlive time.Duration + Server string + + socket net.Conn + pwrite chan string + end chan struct{} + + nick string //The nickname we want. + nickcurrent string //The nickname we currently have. + user string + registered bool + events map[string]map[int]func(*Event) + + QuitMessage string + lastMessage time.Time + + VerboseCallbackHandler bool + Log *log.Logger + + stopped bool + quit bool +} + +// A struct to represent an event. +type Event struct { + Code string + Raw string + Nick string // + Host string //!@ + Source string // + User string // + Arguments []string + Connection *Connection +} + +// Retrieve the last message from Event arguments. +// This function leaves the arguments untouched and +// returns an empty string if there are none. +func (e *Event) Message() string { + if len(e.Arguments) == 0 { + return "" + } + return e.Arguments[len(e.Arguments)-1] +} diff --git a/vendor/github.com/thoj/go-ircevent/irc_test_fuzz.go b/vendor/github.com/thoj/go-ircevent/irc_test_fuzz.go new file mode 100644 index 00000000..82202e1c --- /dev/null +++ b/vendor/github.com/thoj/go-ircevent/irc_test_fuzz.go @@ -0,0 +1,14 @@ +// +build gofuzz + +package irc + +func Fuzz(data []byte) int { + b := bytes.NewBuffer(data) + event, err := parseToEvent(b.String()) + if err == nil { + irc := IRC("go-eventirc", "go-eventirc") + irc.RunCallbacks(event) + return 1 + } + return 0 +} diff --git a/vendor/golang.org/x/crypto/bcrypt/LICENSE b/vendor/golang.org/x/crypto/bcrypt/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 00000000..fc311609 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 00000000..f8b807f9 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,294 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "golang.org/x/crypto/blowfish" + "io" + "strconv" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + ckey := append(key, 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n += 1 + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n += 1 + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/blowfish/LICENSE b/vendor/golang.org/x/crypto/blowfish/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 00000000..9d80f195 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 00000000..542984aa --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,91 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See http://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatiblity, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 00000000..8c5ee4cb --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// http://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/net/http2/hpack/LICENSE b/vendor/golang.org/x/net/http2/hpack/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 00000000..f9bb0339 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + for idx, hf := range staticTable { + if !constantTimeStringCompare(hf.Name, f.Name) { + continue + } + if i == 0 { + i = uint64(idx + 1) + } + if f.Sensitive { + continue + } + if !constantTimeStringCompare(hf.Value, f.Value) { + continue + } + i = uint64(idx + 1) + nameValueMatch = true + return + } + + j, nameValueMatch := e.dynTab.search(f) + if nameValueMatch || (i == 0 && j != 0) { + i = j + uint64(len(staticTable)) + } + return +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 00000000..dcf257af --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,542 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid psuedo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7540 section 5.2. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // ents is the FIFO described at + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + // The newest (low index) is append at the end, and items are + // evicted from the front. + ents []HeaderField + size uint32 + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +// TODO: change dynamicTable to be a struct with a slice and a size int field, +// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: +// +// +// Then make add increment the size. maybe the max size should move from Decoder to +// dynamicTable and add should return an ok bool if there was enough space. +// +// Later we'll need a remove operation on dynamicTable. + +func (dt *dynamicTable) add(f HeaderField) { + dt.ents = append(dt.ents, f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff (front of the slice) +func (dt *dynamicTable) evict() { + base := dt.ents // keep base pointer of slice + for dt.size > dt.maxSize { + dt.size -= dt.ents[0].Size() + dt.ents = dt.ents[1:] + } + + // Shift slice contents down if we evicted things. + if len(dt.ents) != len(base) { + copy(base, dt.ents) + dt.ents = base[:len(dt.ents)] + } +} + +// constantTimeStringCompare compares string a and b in a constant +// time manner. +func constantTimeStringCompare(a, b string) bool { + if len(a) != len(b) { + return false + } + + c := byte(0) + + for i := 0; i < len(a); i++ { + c |= a[i] ^ b[i] + } + + return c == 0 +} + +// Search searches f in the table. The return value i is 0 if there is +// no name match. If there is name match or name/value match, i is the +// index of that entry (1-based). If both name and value match, +// nameValueMatch becomes true. +func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + l := len(dt.ents) + for j := l - 1; j >= 0; j-- { + ent := dt.ents[j] + if !constantTimeStringCompare(ent.Name, f.Name) { + continue + } + if i == 0 { + i = uint64(l - j) + } + if f.Sensitive { + continue + } + if !constantTimeStringCompare(ent.Value, f.Value) { + continue + } + i = uint64(l - j) + nameValueMatch = true + return + } + return +} + +func (d *Decoder) maxTableIndex() int { + return len(d.dynTab.ents) + len(staticTable) +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + if i < 1 { + return + } + if i > uint64(d.maxTableIndex()) { + return + } + if i <= uint64(len(staticTable)) { + return staticTable[i-1], true + } + dents := d.dynTab.ents + return dents[len(dents)-(int(i)-len(staticTable))], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 00000000..eb4b1f05 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,190 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + n := rootHuffmanNode + cur, nbits := uint(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + nbits += 8 + for nbits >= 8 { + idx := byte(cur >> (nbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + nbits -= n.codeLen + n = rootHuffmanNode + } else { + nbits -= 8 + } + } + } + for nbits > 0 { + n = n.children[byte(cur<<(8-nbits))] + if n.children != nil || n.codeLen > nbits { + break + } + buf.WriteByte(n.sym) + nbits -= n.codeLen + n = rootHuffmanNode + } + return nil +} + +type node struct { + // children is non-nil for internal nodes + children []*node + + // The following are only valid if children is nil: + codeLen uint8 // number of bits that led to the output of sym + sym byte // output symbol +} + +func newInternalNode() *node { + return &node{children: make([]*node, 256)} +} + +var rootHuffmanNode = newInternalNode() + +func init() { + if len(huffmanCodes) != 256 { + panic("unexpected size") + } + for i, code := range huffmanCodes { + addDecoderNode(byte(i), code, huffmanCodeLen[i]) + } +} + +func addDecoderNode(sym byte, code uint32, codeLen uint8) { + cur := rootHuffmanNode + for codeLen > 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 00000000..b9283a02 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,352 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = [...]HeaderField{ + pair(":authority", ""), // index 1 (1-based) + pair(":method", "GET"), + pair(":method", "POST"), + pair(":path", "/"), + pair(":path", "/index.html"), + pair(":scheme", "http"), + pair(":scheme", "https"), + pair(":status", "200"), + pair(":status", "204"), + pair(":status", "206"), + pair(":status", "304"), + pair(":status", "400"), + pair(":status", "404"), + pair(":status", "500"), + pair("accept-charset", ""), + pair("accept-encoding", "gzip, deflate"), + pair("accept-language", ""), + pair("accept-ranges", ""), + pair("accept", ""), + pair("access-control-allow-origin", ""), + pair("age", ""), + pair("allow", ""), + pair("authorization", ""), + pair("cache-control", ""), + pair("content-disposition", ""), + pair("content-encoding", ""), + pair("content-language", ""), + pair("content-length", ""), + pair("content-location", ""), + pair("content-range", ""), + pair("content-type", ""), + pair("cookie", ""), + pair("date", ""), + pair("etag", ""), + pair("expect", ""), + pair("expires", ""), + pair("from", ""), + pair("host", ""), + pair("if-match", ""), + pair("if-modified-since", ""), + pair("if-none-match", ""), + pair("if-range", ""), + pair("if-unmodified-since", ""), + pair("last-modified", ""), + pair("link", ""), + pair("location", ""), + pair("max-forwards", ""), + pair("proxy-authenticate", ""), + pair("proxy-authorization", ""), + pair("range", ""), + pair("referer", ""), + pair("refresh", ""), + pair("retry-after", ""), + pair("server", ""), + pair("set-cookie", ""), + pair("strict-transport-security", ""), + pair("transfer-encoding", ""), + pair("user-agent", ""), + pair("vary", ""), + pair("via", ""), + pair("www-authenticate", ""), +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/gopkg.in/gcfg.v1/LICENSE b/vendor/gopkg.in/gcfg.v1/LICENSE new file mode 100644 index 00000000..87a5cede --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/gcfg.v1/doc.go b/vendor/gopkg.in/gcfg.v1/doc.go new file mode 100644 index 00000000..0e198e36 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/doc.go @@ -0,0 +1,118 @@ +// Package gcfg reads "INI-style" text-based configuration files with +// "name=value" pairs grouped into sections (gcfg files). +// +// This package is still a work in progress; see the sections below for planned +// changes. +// +// Syntax +// +// The syntax is based on that used by git config: +// http://git-scm.com/docs/git-config#_syntax . +// There are some (planned) differences compared to the git config format: +// - improve data portability: +// - must be encoded in UTF-8 (for now) and must not contain the 0 byte +// - include and "path" type is not supported +// (path type may be implementable as a user-defined type) +// - internationalization +// - section and variable names can contain unicode letters, unicode digits +// (as defined in http://golang.org/ref/spec#Characters ) and hyphens +// (U+002D), starting with a unicode letter +// - disallow potentially ambiguous or misleading definitions: +// - `[sec.sub]` format is not allowed (deprecated in gitconfig) +// - `[sec ""]` is not allowed +// - use `[sec]` for section name "sec" and empty subsection name +// - (planned) within a single file, definitions must be contiguous for each: +// - section: '[secA]' -> '[secB]' -> '[secA]' is an error +// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error +// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error +// +// Data structure +// +// The functions in this package read values into a user-defined struct. +// Each section corresponds to a struct field in the config struct, and each +// variable in a section corresponds to a data field in the section struct. +// The mapping of each section or variable name to fields is done either based +// on the "gcfg" struct tag or by matching the name of the section or variable, +// ignoring case. In the latter case, hyphens '-' in section and variable names +// correspond to underscores '_' in field names. +// Fields must be exported; to use a section or variable name starting with a +// letter that is neither upper- or lower-case, prefix the field name with 'X'. +// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .) +// +// For sections with subsections, the corresponding field in config must be a +// map, rather than a struct, with string keys and pointer-to-struct values. +// Values for subsection variables are stored in the map with the subsection +// name used as the map key. +// (Note that unlike section and variable names, subsection names are case +// sensitive.) +// When using a map, and there is a section with the same section name but +// without a subsection name, its values are stored with the empty string used +// as the key. +// +// The functions in this package panic if config is not a pointer to a struct, +// or when a field is not of a suitable type (either a struct or a map with +// string keys and pointer-to-struct values). +// +// Parsing of values +// +// The section structs in the config struct may contain single-valued or +// multi-valued variables. Variables of unnamed slice type (that is, a type +// starting with `[]`) are treated as multi-value; all others (including named +// slice types) are treated as single-valued variables. +// +// Single-valued variables are handled based on the type as follows. +// Unnamed pointer types (that is, types starting with `*`) are dereferenced, +// and if necessary, a new instance is allocated. +// +// For types implementing the encoding.TextUnmarshaler interface, the +// UnmarshalText method is used to set the value. Implementing this method is +// the recommended way for parsing user-defined types. +// +// For fields of string kind, the value string is assigned to the field, after +// unquoting and unescaping as needed. +// For fields of bool kind, the field is set to true if the value is "true", +// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or +// "0", ignoring case. In addition, single-valued bool fields can be specified +// with a "blank" value (variable name without equals sign and value); in such +// case the value is set to true. +// +// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as +// decimal or hexadecimal (if having '0x' prefix). (This is to prevent +// unintuitively handling zero-padded numbers as octal.) Other types having +// [u]int* as the underlying type, such as os.FileMode and uintptr allow +// decimal, hexadecimal, or octal values. +// Parsing mode for integer types can be overridden using the struct tag option +// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters +// (each standing for decimal, hexadecimal, and octal, respectively.) +// +// All other types are parsed using fmt.Sscanf with the "%v" verb. +// +// For multi-valued variables, each individual value is parsed as above and +// appended to the slice. If the first value is specified as a "blank" value +// (variable name without equals sign and value), a new slice is allocated; +// that is any values previously set in the slice will be ignored. +// +// The types subpackage for provides helpers for parsing "enum-like" and integer +// types. +// +// TODO +// +// The following is a list of changes under consideration: +// - documentation +// - self-contained syntax documentation +// - more practical examples +// - move TODOs to issue tracker (eventually) +// - syntax +// - reconsider valid escape sequences +// (gitconfig doesn't support \r in value, \t in subsection name, etc.) +// - reading / parsing gcfg files +// - define internal representation structure +// - support multiple inputs (readers, strings, files) +// - support declaring encoding (?) +// - support varying fields sets for subsections (?) +// - writing gcfg files +// - error handling +// - make error context accessible programmatically? +// - limit input size? +// +package gcfg // import "gopkg.in/gcfg.v1" diff --git a/vendor/gopkg.in/gcfg.v1/go1_0.go b/vendor/gopkg.in/gcfg.v1/go1_0.go new file mode 100644 index 00000000..66702107 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/go1_0.go @@ -0,0 +1,7 @@ +// +build !go1.2 + +package gcfg + +type textUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/gopkg.in/gcfg.v1/go1_2.go b/vendor/gopkg.in/gcfg.v1/go1_2.go new file mode 100644 index 00000000..6f5843bc --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/go1_2.go @@ -0,0 +1,9 @@ +// +build go1.2 + +package gcfg + +import ( + "encoding" +) + +type textUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/gopkg.in/gcfg.v1/read.go b/vendor/gopkg.in/gcfg.v1/read.go new file mode 100644 index 00000000..fdfb5f3a --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/read.go @@ -0,0 +1,188 @@ +package gcfg + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strings" +) + +import ( + "gopkg.in/gcfg.v1/scanner" + "gopkg.in/gcfg.v1/token" +) + +var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'} + +// no error: invalid literals should be caught by scanner +func unquote(s string) string { + u, q, esc := make([]rune, 0, len(s)), false, false + for _, c := range s { + if esc { + uc, ok := unescape[c] + switch { + case ok: + u = append(u, uc) + fallthrough + case !q && c == '\n': + esc = false + continue + } + panic("invalid escape sequence") + } + switch c { + case '"': + q = !q + case '\\': + esc = true + default: + u = append(u, c) + } + } + if q { + panic("missing end quote") + } + if esc { + panic("invalid escape sequence") + } + return string(u) +} + +func readInto(config interface{}, fset *token.FileSet, file *token.File, src []byte) error { + var s scanner.Scanner + var errs scanner.ErrorList + s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0) + sect, sectsub := "", "" + pos, tok, lit := s.Scan() + errfn := func(msg string) error { + return fmt.Errorf("%s: %s", fset.Position(pos), msg) + } + for { + if errs.Len() > 0 { + return errs.Err() + } + switch tok { + case token.EOF: + return nil + case token.EOL, token.COMMENT: + pos, tok, lit = s.Scan() + case token.LBRACK: + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + if tok != token.IDENT { + return errfn("expected section name") + } + sect, sectsub = lit, "" + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + if tok == token.STRING { + sectsub = unquote(lit) + if sectsub == "" { + return errfn("empty subsection name") + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + } + if tok != token.RBRACK { + if sectsub == "" { + return errfn("expected subsection name or right bracket") + } + return errfn("expected right bracket") + } + pos, tok, lit = s.Scan() + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + return errfn("expected EOL, EOF, or comment") + } + // If a section/subsection header was found, ensure a + // container object is created, even if there are no + // variables further down. + err := set(config, sect, sectsub, "", true, "") + if err != nil { + return err + } + case token.IDENT: + if sect == "" { + return errfn("expected section header") + } + n := lit + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, "" + if !blank { + if tok != token.ASSIGN { + return errfn("expected '='") + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + if tok != token.STRING { + return errfn("expected value") + } + v = unquote(lit) + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + return errfn("expected EOL, EOF, or comment") + } + } + err := set(config, sect, sectsub, n, blank, v) + if err != nil { + return err + } + default: + if sect == "" { + return errfn("expected section header") + } + return errfn("expected section header or variable declaration") + } + } + panic("never reached") +} + +// ReadInto reads gcfg formatted data from reader and sets the values into the +// corresponding fields in config. +func ReadInto(config interface{}, reader io.Reader) error { + src, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + return readInto(config, fset, file, src) +} + +// ReadStringInto reads gcfg formatted data from str and sets the values into +// the corresponding fields in config. +func ReadStringInto(config interface{}, str string) error { + r := strings.NewReader(str) + return ReadInto(config, r) +} + +// ReadFileInto reads gcfg formatted data from the file filename and sets the +// values into the corresponding fields in config. +func ReadFileInto(config interface{}, filename string) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + src, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fset := token.NewFileSet() + file := fset.AddFile(filename, fset.Base(), len(src)) + return readInto(config, fset, file, src) +} diff --git a/vendor/gopkg.in/gcfg.v1/scanner/errors.go b/vendor/gopkg.in/gcfg.v1/scanner/errors.go new file mode 100644 index 00000000..1a3c0f65 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/scanner/errors.go @@ -0,0 +1,121 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scanner + +import ( + "fmt" + "io" + "sort" +) + +import ( + "gopkg.in/gcfg.v1/token" +) + +// In an ErrorList, an error is represented by an *Error. +// The position Pos, if valid, points to the beginning of +// the offending token, and the error condition is described +// by Msg. +// +type Error struct { + Pos token.Position + Msg string +} + +// Error implements the error interface. +func (e Error) Error() string { + if e.Pos.Filename != "" || e.Pos.IsValid() { + // don't print "" + // TODO(gri) reconsider the semantics of Position.IsValid + return e.Pos.String() + ": " + e.Msg + } + return e.Msg +} + +// ErrorList is a list of *Errors. +// The zero value for an ErrorList is an empty ErrorList ready to use. +// +type ErrorList []*Error + +// Add adds an Error with given position and error message to an ErrorList. +func (p *ErrorList) Add(pos token.Position, msg string) { + *p = append(*p, &Error{pos, msg}) +} + +// Reset resets an ErrorList to no errors. +func (p *ErrorList) Reset() { *p = (*p)[0:0] } + +// ErrorList implements the sort Interface. +func (p ErrorList) Len() int { return len(p) } +func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p ErrorList) Less(i, j int) bool { + e := &p[i].Pos + f := &p[j].Pos + if e.Filename < f.Filename { + return true + } + if e.Filename == f.Filename { + return e.Offset < f.Offset + } + return false +} + +// Sort sorts an ErrorList. *Error entries are sorted by position, +// other errors are sorted by error message, and before any *Error +// entry. +// +func (p ErrorList) Sort() { + sort.Sort(p) +} + +// RemoveMultiples sorts an ErrorList and removes all but the first error per line. +func (p *ErrorList) RemoveMultiples() { + sort.Sort(p) + var last token.Position // initial last.Line is != any legal error line + i := 0 + for _, e := range *p { + if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line { + last = e.Pos + (*p)[i] = e + i++ + } + } + (*p) = (*p)[0:i] +} + +// An ErrorList implements the error interface. +func (p ErrorList) Error() string { + switch len(p) { + case 0: + return "no errors" + case 1: + return p[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1) +} + +// Err returns an error equivalent to this error list. +// If the list is empty, Err returns nil. +func (p ErrorList) Err() error { + if len(p) == 0 { + return nil + } + return p +} + +// PrintError is a utility function that prints a list of errors to w, +// one error per line, if the err parameter is an ErrorList. Otherwise +// it prints the err string. +// +func PrintError(w io.Writer, err error) { + if list, ok := err.(ErrorList); ok { + for _, e := range list { + fmt.Fprintf(w, "%s\n", e) + } + } else if err != nil { + fmt.Fprintf(w, "%s\n", err) + } +} diff --git a/vendor/gopkg.in/gcfg.v1/scanner/scanner.go b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go new file mode 100644 index 00000000..bbbdbf53 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go @@ -0,0 +1,342 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scanner implements a scanner for gcfg configuration text. +// It takes a []byte as source which can then be tokenized +// through repeated calls to the Scan method. +// +// Note that the API for the scanner package may change to accommodate new +// features or implementation changes in gcfg. +// +package scanner + +import ( + "fmt" + "path/filepath" + "unicode" + "unicode/utf8" +) + +import ( + "gopkg.in/gcfg.v1/token" +) + +// An ErrorHandler may be provided to Scanner.Init. If a syntax error is +// encountered and a handler was installed, the handler is called with a +// position and an error message. The position points to the beginning of +// the offending token. +// +type ErrorHandler func(pos token.Position, msg string) + +// A Scanner holds the scanner's internal state while processing +// a given text. It can be allocated as part of another data +// structure but must be initialized via Init before use. +// +type Scanner struct { + // immutable state + file *token.File // source file handle + dir string // directory portion of file.Name() + src []byte // source + err ErrorHandler // error reporting; or nil + mode Mode // scanning mode + + // scanning state + ch rune // current character + offset int // character offset + rdOffset int // reading offset (position after current character) + lineOffset int // current line offset + nextVal bool // next token is expected to be a value + + // public state - ok to modify + ErrorCount int // number of errors encountered +} + +// Read the next Unicode char into s.ch. +// s.ch < 0 means end-of-file. +// +func (s *Scanner) next() { + if s.rdOffset < len(s.src) { + s.offset = s.rdOffset + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + r, w := rune(s.src[s.rdOffset]), 1 + switch { + case r == 0: + s.error(s.offset, "illegal character NUL") + case r >= 0x80: + // not ASCII + r, w = utf8.DecodeRune(s.src[s.rdOffset:]) + if r == utf8.RuneError && w == 1 { + s.error(s.offset, "illegal UTF-8 encoding") + } + } + s.rdOffset += w + s.ch = r + } else { + s.offset = len(s.src) + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + s.ch = -1 // eof + } +} + +// A mode value is a set of flags (or 0). +// They control scanner behavior. +// +type Mode uint + +const ( + ScanComments Mode = 1 << iota // return comments as COMMENT tokens +) + +// Init prepares the scanner s to tokenize the text src by setting the +// scanner at the beginning of src. The scanner uses the file set file +// for position information and it adds line information for each line. +// It is ok to re-use the same file when re-scanning the same file as +// line information which is already present is ignored. Init causes a +// panic if the file size does not match the src size. +// +// Calls to Scan will invoke the error handler err if they encounter a +// syntax error and err is not nil. Also, for each error encountered, +// the Scanner field ErrorCount is incremented by one. The mode parameter +// determines how comments are handled. +// +// Note that Init may call err if there is an error in the first character +// of the file. +// +func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { + // Explicitly initialize all fields since a scanner may be reused. + if file.Size() != len(src) { + panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) + } + s.file = file + s.dir, _ = filepath.Split(file.Name()) + s.src = src + s.err = err + s.mode = mode + + s.ch = ' ' + s.offset = 0 + s.rdOffset = 0 + s.lineOffset = 0 + s.ErrorCount = 0 + s.nextVal = false + + s.next() +} + +func (s *Scanner) error(offs int, msg string) { + if s.err != nil { + s.err(s.file.Position(s.file.Pos(offs)), msg) + } + s.ErrorCount++ +} + +func (s *Scanner) scanComment() string { + // initial [;#] already consumed + offs := s.offset - 1 // position of initial [;#] + + for s.ch != '\n' && s.ch >= 0 { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +func (s *Scanner) scanIdentifier() string { + offs := s.offset + for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanEscape(val bool) { + offs := s.offset + ch := s.ch + s.next() // always make progress + switch ch { + case '\\', '"': + // ok + case 'n', 't': + if val { + break // ok + } + fallthrough + default: + s.error(offs, "unknown escape sequence") + } +} + +func (s *Scanner) scanString() string { + // '"' opening already consumed + offs := s.offset - 1 + + for s.ch != '"' { + ch := s.ch + s.next() + if ch == '\n' || ch < 0 { + s.error(offs, "string not terminated") + break + } + if ch == '\\' { + s.scanEscape(false) + } + } + + s.next() + + return string(s.src[offs:s.offset]) +} + +func stripCR(b []byte) []byte { + c := make([]byte, len(b)) + i := 0 + for _, ch := range b { + if ch != '\r' { + c[i] = ch + i++ + } + } + return c[:i] +} + +func (s *Scanner) scanValString() string { + offs := s.offset + + hasCR := false + end := offs + inQuote := false +loop: + for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' { + ch := s.ch + s.next() + switch { + case inQuote && ch == '\\': + s.scanEscape(true) + case !inQuote && ch == '\\': + if s.ch == '\r' { + hasCR = true + s.next() + } + if s.ch != '\n' { + s.error(offs, "unquoted '\\' must be followed by new line") + break loop + } + s.next() + case ch == '"': + inQuote = !inQuote + case ch == '\r': + hasCR = true + case ch < 0 || inQuote && ch == '\n': + s.error(offs, "string not terminated") + break loop + } + if inQuote || !isWhiteSpace(ch) { + end = s.offset + } + } + + lit := s.src[offs:end] + if hasCR { + lit = stripCR(lit) + } + + return string(lit) +} + +func isWhiteSpace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\r' +} + +func (s *Scanner) skipWhitespace() { + for isWhiteSpace(s.ch) { + s.next() + } +} + +// Scan scans the next token and returns the token position, the token, +// and its literal string if applicable. The source end is indicated by +// token.EOF. +// +// If the returned token is a literal (token.IDENT, token.STRING) or +// token.COMMENT, the literal string has the corresponding value. +// +// If the returned token is token.ILLEGAL, the literal string is the +// offending character. +// +// In all other cases, Scan returns an empty literal string. +// +// For more tolerant parsing, Scan will return a valid token if +// possible even if a syntax error was encountered. Thus, even +// if the resulting token sequence contains no illegal tokens, +// a client may not assume that no error occurred. Instead it +// must check the scanner's ErrorCount or the number of calls +// of the error handler, if there was one installed. +// +// Scan adds line information to the file added to the file +// set with Init. Token positions are relative to that file +// and thus relative to the file set. +// +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { +scanAgain: + s.skipWhitespace() + + // current token start + pos = s.file.Pos(s.offset) + + // determine token value + switch ch := s.ch; { + case s.nextVal: + lit = s.scanValString() + tok = token.STRING + s.nextVal = false + case isLetter(ch): + lit = s.scanIdentifier() + tok = token.IDENT + default: + s.next() // always make progress + switch ch { + case -1: + tok = token.EOF + case '\n': + tok = token.EOL + case '"': + tok = token.STRING + lit = s.scanString() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case ';', '#': + // comment + lit = s.scanComment() + if s.mode&ScanComments == 0 { + // skip comment + goto scanAgain + } + tok = token.COMMENT + case '=': + tok = token.ASSIGN + s.nextVal = true + default: + s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) + tok = token.ILLEGAL + lit = string(ch) + } + } + + return +} diff --git a/vendor/gopkg.in/gcfg.v1/set.go b/vendor/gopkg.in/gcfg.v1/set.go new file mode 100644 index 00000000..7252b689 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/set.go @@ -0,0 +1,293 @@ +package gcfg + +import ( + "fmt" + "math/big" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "gopkg.in/gcfg.v1/types" +) + +type tag struct { + ident string + intMode string +} + +func newTag(ts string) tag { + t := tag{} + s := strings.Split(ts, ",") + t.ident = s[0] + for _, tse := range s[1:] { + if strings.HasPrefix(tse, "int=") { + t.intMode = tse[len("int="):] + } + } + return t +} + +func fieldFold(v reflect.Value, name string) (reflect.Value, tag) { + var n string + r0, _ := utf8.DecodeRuneInString(name) + if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) { + n = "X" + } + n += strings.Replace(name, "-", "_", -1) + f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool { + if !v.FieldByName(fieldName).CanSet() { + return false + } + f, _ := v.Type().FieldByName(fieldName) + t := newTag(f.Tag.Get("gcfg")) + if t.ident != "" { + return strings.EqualFold(t.ident, name) + } + return strings.EqualFold(n, fieldName) + }) + if !ok { + return reflect.Value{}, tag{} + } + return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg")) +} + +type setter func(destp interface{}, blank bool, val string, t tag) error + +var errUnsupportedType = fmt.Errorf("unsupported type") +var errBlankUnsupported = fmt.Errorf("blank value not supported for type") + +var setters = []setter{ + typeSetter, textUnmarshalerSetter, kindSetter, scanSetter, +} + +func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { + dtu, ok := d.(textUnmarshaler) + if !ok { + return errUnsupportedType + } + if blank { + return errBlankUnsupported + } + return dtu.UnmarshalText([]byte(val)) +} + +func boolSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true)) + return nil + } + b, err := types.ParseBool(val) + if err == nil { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b)) + } + return err +} + +func intMode(mode string) types.IntMode { + var m types.IntMode + if strings.ContainsAny(mode, "dD") { + m |= types.Dec + } + if strings.ContainsAny(mode, "hH") { + m |= types.Hex + } + if strings.ContainsAny(mode, "oO") { + m |= types.Oct + } + return m +} + +var typeModes = map[reflect.Type]types.IntMode{ + reflect.TypeOf(int(0)): types.Dec | types.Hex, + reflect.TypeOf(int8(0)): types.Dec | types.Hex, + reflect.TypeOf(int16(0)): types.Dec | types.Hex, + reflect.TypeOf(int32(0)): types.Dec | types.Hex, + reflect.TypeOf(int64(0)): types.Dec | types.Hex, + reflect.TypeOf(uint(0)): types.Dec | types.Hex, + reflect.TypeOf(uint8(0)): types.Dec | types.Hex, + reflect.TypeOf(uint16(0)): types.Dec | types.Hex, + reflect.TypeOf(uint32(0)): types.Dec | types.Hex, + reflect.TypeOf(uint64(0)): types.Dec | types.Hex, + // use default mode (allow dec/hex/oct) for uintptr type + reflect.TypeOf(big.Int{}): types.Dec | types.Hex, +} + +func intModeDefault(t reflect.Type) types.IntMode { + m, ok := typeModes[t] + if !ok { + m = types.Dec | types.Hex | types.Oct + } + return m +} + +func intSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + mode := intMode(t.intMode) + if mode == 0 { + mode = intModeDefault(reflect.TypeOf(d).Elem()) + } + return types.ParseInt(d, val, mode) +} + +func stringSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + dsp, ok := d.(*string) + if !ok { + return errUnsupportedType + } + *dsp = val + return nil +} + +var kindSetters = map[reflect.Kind]setter{ + reflect.String: stringSetter, + reflect.Bool: boolSetter, + reflect.Int: intSetter, + reflect.Int8: intSetter, + reflect.Int16: intSetter, + reflect.Int32: intSetter, + reflect.Int64: intSetter, + reflect.Uint: intSetter, + reflect.Uint8: intSetter, + reflect.Uint16: intSetter, + reflect.Uint32: intSetter, + reflect.Uint64: intSetter, + reflect.Uintptr: intSetter, +} + +var typeSetters = map[reflect.Type]setter{ + reflect.TypeOf(big.Int{}): intSetter, +} + +func typeSetter(d interface{}, blank bool, val string, tt tag) error { + t := reflect.ValueOf(d).Type().Elem() + setter, ok := typeSetters[t] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func kindSetter(d interface{}, blank bool, val string, tt tag) error { + k := reflect.ValueOf(d).Type().Elem().Kind() + setter, ok := kindSetters[k] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func scanSetter(d interface{}, blank bool, val string, tt tag) error { + if blank { + return errBlankUnsupported + } + return types.ScanFully(d, val, 'v') +} + +func set(cfg interface{}, sect, sub, name string, blank bool, value string) error { + vPCfg := reflect.ValueOf(cfg) + if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("config must be a pointer to a struct")) + } + vCfg := vPCfg.Elem() + vSect, _ := fieldFold(vCfg, sect) + if !vSect.IsValid() { + return fmt.Errorf("invalid section: section %q", sect) + } + if vSect.Kind() == reflect.Map { + vst := vSect.Type() + if vst.Key().Kind() != reflect.String || + vst.Elem().Kind() != reflect.Ptr || + vst.Elem().Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("map field for section must have string keys and "+ + " pointer-to-struct values: section %q", sect)) + } + if vSect.IsNil() { + vSect.Set(reflect.MakeMap(vst)) + } + k := reflect.ValueOf(sub) + pv := vSect.MapIndex(k) + if !pv.IsValid() { + vType := vSect.Type().Elem().Elem() + pv = reflect.New(vType) + vSect.SetMapIndex(k, pv) + } + vSect = pv.Elem() + } else if vSect.Kind() != reflect.Struct { + panic(fmt.Errorf("field for section must be a map or a struct: "+ + "section %q", sect)) + } else if sub != "" { + return fmt.Errorf("invalid subsection: "+ + "section %q subsection %q", sect, sub) + } + // Empty name is a special value, meaning that only the + // section/subsection object is to be created, with no values set. + if name == "" { + return nil + } + vVar, t := fieldFold(vSect, name) + if !vVar.IsValid() { + return fmt.Errorf("invalid variable: "+ + "section %q subsection %q variable %q", sect, sub, name) + } + // vVal is either single-valued var, or newly allocated value within multi-valued var + var vVal reflect.Value + // multi-value if unnamed slice type + isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice || + vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice + if isMulti && vVar.Kind() == reflect.Ptr { + if vVar.IsNil() { + vVar.Set(reflect.New(vVar.Type().Elem())) + } + vVar = vVar.Elem() + } + if isMulti && blank { + vVar.Set(reflect.Zero(vVar.Type())) + return nil + } + if isMulti { + vVal = reflect.New(vVar.Type().Elem()).Elem() + } else { + vVal = vVar + } + isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr + isNew := isDeref && vVal.IsNil() + // vAddr is address of value to set (dereferenced & allocated as needed) + var vAddr reflect.Value + switch { + case isNew: + vAddr = reflect.New(vVal.Type().Elem()) + case isDeref && !isNew: + vAddr = vVal + default: + vAddr = vVal.Addr() + } + vAddrI := vAddr.Interface() + err, ok := error(nil), false + for _, s := range setters { + err = s(vAddrI, blank, value, t) + if err == nil { + ok = true + break + } + if err != errUnsupportedType { + return err + } + } + if !ok { + // in case all setters returned errUnsupportedType + return err + } + if isNew { // set reference if it was dereferenced and newly allocated + vVal.Set(vAddr) + } + if isMulti { // append if multi-valued + vVar.Set(reflect.Append(vVar, vVal)) + } + return nil +} diff --git a/vendor/gopkg.in/gcfg.v1/token/position.go b/vendor/gopkg.in/gcfg.v1/token/position.go new file mode 100644 index 00000000..fc45c1e7 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/token/position.go @@ -0,0 +1,435 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(gri) consider making this a separate package outside the go directory. + +package token + +import ( + "fmt" + "sort" + "sync" +) + +// ----------------------------------------------------------------------------- +// Positions + +// Position describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +// +type Position struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (pos *Position) IsValid() bool { return pos.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +// +func (pos Position) String() string { + s := pos.Filename + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Pos is a compact encoding of a source position within a file set. +// It can be converted into a Position for a more convenient, but much +// larger, representation. +// +// The Pos value for a given file is a number in the range [base, base+size], +// where base and size are specified when adding the file to the file set via +// AddFile. +// +// To create the Pos value for a specific source offset, first add +// the respective file to the current file set (via FileSet.AddFile) +// and then call File.Pos(offset) for that file. Given a Pos value p +// for a specific file set fset, the corresponding Position value is +// obtained by calling fset.Position(p). +// +// Pos values can be compared directly with the usual comparison operators: +// If two Pos values p and q are in the same file, comparing p and q is +// equivalent to comparing the respective source file offsets. If p and q +// are in different files, p < q is true if the file implied by p was added +// to the respective file set before the file implied by q. +// +type Pos int + +// The zero value for Pos is NoPos; there is no file and line information +// associated with it, and NoPos().IsValid() is false. NoPos is always +// smaller than any other Pos value. The corresponding Position value +// for NoPos is the zero value for Position. +// +const NoPos Pos = 0 + +// IsValid returns true if the position is valid. +func (p Pos) IsValid() bool { + return p != NoPos +} + +// ----------------------------------------------------------------------------- +// File + +// A File is a handle for a file belonging to a FileSet. +// A File has a name, size, and line offset table. +// +type File struct { + set *FileSet + name string // file name as provided to AddFile + base int // Pos value range for this file is [base...base+size] + size int // file size as provided to AddFile + + // lines and infos are protected by set.mutex + lines []int + infos []lineInfo +} + +// Name returns the file name of file f as registered with AddFile. +func (f *File) Name() string { + return f.name +} + +// Base returns the base offset of file f as registered with AddFile. +func (f *File) Base() int { + return f.base +} + +// Size returns the size of file f as registered with AddFile. +func (f *File) Size() int { + return f.size +} + +// LineCount returns the number of lines in file f. +func (f *File) LineCount() int { + f.set.mutex.RLock() + n := len(f.lines) + f.set.mutex.RUnlock() + return n +} + +// AddLine adds the line offset for a new line. +// The line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise the line offset is ignored. +// +func (f *File) AddLine(offset int) { + f.set.mutex.Lock() + if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { + f.lines = append(f.lines, offset) + } + f.set.mutex.Unlock() +} + +// SetLines sets the line offsets for a file and returns true if successful. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. +// Each line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise SetLines fails and returns +// false. +// +func (f *File) SetLines(lines []int) bool { + // verify validity of lines table + size := f.size + for i, offset := range lines { + if i > 0 && offset <= lines[i-1] || size <= offset { + return false + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() + return true +} + +// SetLinesForContent sets the line offsets for the given file content. +func (f *File) SetLinesForContent(content []byte) { + var lines []int + line := 0 + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = offset + 1 + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() +} + +// A lineInfo object describes alternative file and line number +// information (such as provided via a //line comment in a .go +// file) for a given file offset. +type lineInfo struct { + // fields are exported to make them accessible to gob + Offset int + Filename string + Line int +} + +// AddLineInfo adds alternative file and line number information for +// a given file offset. The offset must be larger than the offset for +// the previously added alternative line info and smaller than the +// file size; otherwise the information is ignored. +// +// AddLineInfo is typically used to register alternative position +// information for //line filename:line comments in source files. +// +func (f *File) AddLineInfo(offset int, filename string, line int) { + f.set.mutex.Lock() + if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size { + f.infos = append(f.infos, lineInfo{offset, filename, line}) + } + f.set.mutex.Unlock() +} + +// Pos returns the Pos value for the given file offset; +// the offset must be <= f.Size(). +// f.Pos(f.Offset(p)) == p. +// +func (f *File) Pos(offset int) Pos { + if offset > f.size { + panic("illegal file offset") + } + return Pos(f.base + offset) +} + +// Offset returns the offset for the given file position p; +// p must be a valid Pos value in that file. +// f.Offset(f.Pos(offset)) == offset. +// +func (f *File) Offset(p Pos) int { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + return int(p) - f.base +} + +// Line returns the line number for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Line(p Pos) int { + // TODO(gri) this can be implemented much more efficiently + return f.Position(p).Line +} + +func searchLineInfos(a []lineInfo, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 +} + +// info returns the file name, line, and column number for a file offset. +func (f *File) info(offset int) (filename string, line, column int) { + filename = f.name + if i := searchInts(f.lines, offset); i >= 0 { + line, column = i+1, offset-f.lines[i]+1 + } + if len(f.infos) > 0 { + // almost no files have extra line infos + if i := searchLineInfos(f.infos, offset); i >= 0 { + alt := &f.infos[i] + filename = alt.Filename + if i := searchInts(f.lines, alt.Offset); i >= 0 { + line += alt.Line - i - 1 + } + } + } + return +} + +func (f *File) position(p Pos) (pos Position) { + offset := int(p) - f.base + pos.Offset = offset + pos.Filename, pos.Line, pos.Column = f.info(offset) + return +} + +// Position returns the Position value for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Position(p Pos) (pos Position) { + if p != NoPos { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + pos = f.position(p) + } + return +} + +// ----------------------------------------------------------------------------- +// FileSet + +// A FileSet represents a set of source files. +// Methods of file sets are synchronized; multiple goroutines +// may invoke them concurrently. +// +type FileSet struct { + mutex sync.RWMutex // protects the file set + base int // base offset for the next file + files []*File // list of files in the order added to the set + last *File // cache of last file looked up +} + +// NewFileSet creates a new file set. +func NewFileSet() *FileSet { + s := new(FileSet) + s.base = 1 // 0 == NoPos + return s +} + +// Base returns the minimum base offset that must be provided to +// AddFile when adding the next file. +// +func (s *FileSet) Base() int { + s.mutex.RLock() + b := s.base + s.mutex.RUnlock() + return b + +} + +// AddFile adds a new file with a given filename, base offset, and file size +// to the file set s and returns the file. Multiple files may have the same +// name. The base offset must not be smaller than the FileSet's Base(), and +// size must not be negative. +// +// Adding the file will set the file set's Base() value to base + size + 1 +// as the minimum base value for the next file. The following relationship +// exists between a Pos value p for a given file offset offs: +// +// int(p) = base + offs +// +// with offs in the range [0, size] and thus p in the range [base, base+size]. +// For convenience, File.Pos may be used to create file-specific position +// values from a file offset. +// +func (s *FileSet) AddFile(filename string, base, size int) *File { + s.mutex.Lock() + defer s.mutex.Unlock() + if base < s.base || size < 0 { + panic("illegal base or size") + } + // base >= s.base && size >= 0 + f := &File{s, filename, base, size, []int{0}, nil} + base += size + 1 // +1 because EOF also has a position + if base < 0 { + panic("token.Pos offset overflow (> 2G of source code in file set)") + } + // add the file to the file set + s.base = base + s.files = append(s.files, f) + s.last = f + return f +} + +// Iterate calls f for the files in the file set in the order they were added +// until f returns false. +// +func (s *FileSet) Iterate(f func(*File) bool) { + for i := 0; ; i++ { + var file *File + s.mutex.RLock() + if i < len(s.files) { + file = s.files[i] + } + s.mutex.RUnlock() + if file == nil || !f(file) { + break + } + } +} + +func searchFiles(a []*File, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 +} + +func (s *FileSet) file(p Pos) *File { + // common case: p is in last file + if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { + return f + } + // p is not in last file - search all files + if i := searchFiles(s.files, int(p)); i >= 0 { + f := s.files[i] + // f.base <= int(p) by definition of searchFiles + if int(p) <= f.base+f.size { + s.last = f + return f + } + } + return nil +} + +// File returns the file that contains the position p. +// If no such file is found (for instance for p == NoPos), +// the result is nil. +// +func (s *FileSet) File(p Pos) (f *File) { + if p != NoPos { + s.mutex.RLock() + f = s.file(p) + s.mutex.RUnlock() + } + return +} + +// Position converts a Pos in the fileset into a general Position. +func (s *FileSet) Position(p Pos) (pos Position) { + if p != NoPos { + s.mutex.RLock() + if f := s.file(p); f != nil { + pos = f.position(p) + } + s.mutex.RUnlock() + } + return +} + +// ----------------------------------------------------------------------------- +// Helper functions + +func searchInts(a []int, x int) int { + // This function body is a manually inlined version of: + // + // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 + // + // With better compiler optimizations, this may not be needed in the + // future, but at the moment this change improves the go/printer + // benchmark performance by ~30%. This has a direct impact on the + // speed of gofmt and thus seems worthwhile (2011-04-29). + // TODO(gri): Remove this when compilers have caught up. + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 // avoid overflow when computing h + // i ≤ h < j + if a[h] <= x { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} diff --git a/vendor/gopkg.in/gcfg.v1/token/serialize.go b/vendor/gopkg.in/gcfg.v1/token/serialize.go new file mode 100644 index 00000000..4adc8f9e --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/token/serialize.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package token + +type serializedFile struct { + // fields correspond 1:1 to fields with same (lower-case) name in File + Name string + Base int + Size int + Lines []int + Infos []lineInfo +} + +type serializedFileSet struct { + Base int + Files []serializedFile +} + +// Read calls decode to deserialize a file set into s; s must not be nil. +func (s *FileSet) Read(decode func(interface{}) error) error { + var ss serializedFileSet + if err := decode(&ss); err != nil { + return err + } + + s.mutex.Lock() + s.base = ss.Base + files := make([]*File, len(ss.Files)) + for i := 0; i < len(ss.Files); i++ { + f := &ss.Files[i] + files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos} + } + s.files = files + s.last = nil + s.mutex.Unlock() + + return nil +} + +// Write calls encode to serialize the file set s. +func (s *FileSet) Write(encode func(interface{}) error) error { + var ss serializedFileSet + + s.mutex.Lock() + ss.Base = s.base + files := make([]serializedFile, len(s.files)) + for i, f := range s.files { + files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos} + } + ss.Files = files + s.mutex.Unlock() + + return encode(ss) +} diff --git a/vendor/gopkg.in/gcfg.v1/token/token.go b/vendor/gopkg.in/gcfg.v1/token/token.go new file mode 100644 index 00000000..b3c7c83f --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/token/token.go @@ -0,0 +1,83 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package token defines constants representing the lexical tokens of the gcfg +// configuration syntax and basic operations on tokens (printing, predicates). +// +// Note that the API for the token package may change to accommodate new +// features or implementation changes in gcfg. +// +package token + +import "strconv" + +// Token is the set of lexical tokens of the gcfg configuration syntax. +type Token int + +// The list of tokens. +const ( + // Special tokens + ILLEGAL Token = iota + EOF + COMMENT + + literal_beg + // Identifiers and basic type literals + // (these tokens stand for classes of literals) + IDENT // section-name, variable-name + STRING // "subsection-name", variable value + literal_end + + operator_beg + // Operators and delimiters + ASSIGN // = + LBRACK // [ + RBRACK // ] + EOL // \n + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + COMMENT: "COMMENT", + + IDENT: "IDENT", + STRING: "STRING", + + ASSIGN: "=", + LBRACK: "[", + RBRACK: "]", + EOL: "\n", +} + +// String returns the string corresponding to the token tok. +// For operators and delimiters, the string is the actual token character +// sequence (e.g., for the token ASSIGN, the string is "="). For all other +// tokens the string corresponds to the token constant name (e.g. for the +// token IDENT, the string is "IDENT"). +// +func (tok Token) String() string { + s := "" + if 0 <= tok && tok < Token(len(tokens)) { + s = tokens[tok] + } + if s == "" { + s = "token(" + strconv.Itoa(int(tok)) + ")" + } + return s +} + +// Predicates + +// IsLiteral returns true for tokens corresponding to identifiers +// and basic type literals; it returns false otherwise. +// +func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +// +func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end } diff --git a/vendor/gopkg.in/gcfg.v1/types/bool.go b/vendor/gopkg.in/gcfg.v1/types/bool.go new file mode 100644 index 00000000..8dcae0d8 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/bool.go @@ -0,0 +1,23 @@ +package types + +// BoolValues defines the name and value mappings for ParseBool. +var BoolValues = map[string]interface{}{ + "true": true, "yes": true, "on": true, "1": true, + "false": false, "no": false, "off": false, "0": false, +} + +var boolParser = func() *EnumParser { + ep := &EnumParser{} + ep.AddVals(BoolValues) + return ep +}() + +// ParseBool parses bool values according to the definitions in BoolValues. +// Parsing is case-insensitive. +func ParseBool(s string) (bool, error) { + v, err := boolParser.Parse(s) + if err != nil { + return false, err + } + return v.(bool), nil +} diff --git a/vendor/gopkg.in/gcfg.v1/types/doc.go b/vendor/gopkg.in/gcfg.v1/types/doc.go new file mode 100644 index 00000000..9f9c345f --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/doc.go @@ -0,0 +1,4 @@ +// Package types defines helpers for type conversions. +// +// The API for this package is not finalized yet. +package types diff --git a/vendor/gopkg.in/gcfg.v1/types/enum.go b/vendor/gopkg.in/gcfg.v1/types/enum.go new file mode 100644 index 00000000..1a0c7ef4 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/enum.go @@ -0,0 +1,44 @@ +package types + +import ( + "fmt" + "reflect" + "strings" +) + +// EnumParser parses "enum" values; i.e. a predefined set of strings to +// predefined values. +type EnumParser struct { + Type string // type name; if not set, use type of first value added + CaseMatch bool // if true, matching of strings is case-sensitive + // PrefixMatch bool + vals map[string]interface{} +} + +// AddVals adds strings and values to an EnumParser. +func (ep *EnumParser) AddVals(vals map[string]interface{}) { + if ep.vals == nil { + ep.vals = make(map[string]interface{}) + } + for k, v := range vals { + if ep.Type == "" { + ep.Type = reflect.TypeOf(v).Name() + } + if !ep.CaseMatch { + k = strings.ToLower(k) + } + ep.vals[k] = v + } +} + +// Parse parses the string and returns the value or an error. +func (ep EnumParser) Parse(s string) (interface{}, error) { + if !ep.CaseMatch { + s = strings.ToLower(s) + } + v, ok := ep.vals[s] + if !ok { + return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s) + } + return v, nil +} diff --git a/vendor/gopkg.in/gcfg.v1/types/int.go b/vendor/gopkg.in/gcfg.v1/types/int.go new file mode 100644 index 00000000..af7e75c1 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/int.go @@ -0,0 +1,86 @@ +package types + +import ( + "fmt" + "strings" +) + +// An IntMode is a mode for parsing integer values, representing a set of +// accepted bases. +type IntMode uint8 + +// IntMode values for ParseInt; can be combined using binary or. +const ( + Dec IntMode = 1 << iota + Hex + Oct +) + +// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`. +func (m IntMode) String() string { + var modes []string + if m&Dec != 0 { + modes = append(modes, "Dec") + } + if m&Hex != 0 { + modes = append(modes, "Hex") + } + if m&Oct != 0 { + modes = append(modes, "Oct") + } + return "IntMode(" + strings.Join(modes, "|") + ")" +} + +var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix") + +func prefix0(val string) bool { + return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0") +} + +func prefix0x(val string) bool { + return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x") +} + +// ParseInt parses val using mode into intptr, which must be a pointer to an +// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases +// when mode permits ambiguity of base; otherwise the prefix can be omitted. +func ParseInt(intptr interface{}, val string, mode IntMode) error { + val = strings.TrimSpace(val) + verb := byte(0) + switch mode { + case Dec: + verb = 'd' + case Dec + Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Oct: + if prefix0(val) && !prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Hex + Oct: + verb = 'v' + case Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'x' + } + case Oct: + verb = 'o' + case Hex + Oct: + if prefix0(val) { + verb = 'v' + } else { + return errIntAmbig + } + } + if verb == 0 { + panic("unsupported mode") + } + return ScanFully(intptr, val, verb) +} diff --git a/vendor/gopkg.in/gcfg.v1/types/scan.go b/vendor/gopkg.in/gcfg.v1/types/scan.go new file mode 100644 index 00000000..db2f6ed3 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/scan.go @@ -0,0 +1,23 @@ +package types + +import ( + "fmt" + "io" + "reflect" +) + +// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr. +func ScanFully(ptr interface{}, val string, verb byte) error { + t := reflect.ValueOf(ptr).Elem().Type() + // attempt to read extra bytes to make sure the value is consumed + var b []byte + n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b) + switch { + case n < 1 || n == 1 && err != io.EOF: + return fmt.Errorf("failed to parse %q as %v: %v", val, t, err) + case n > 1: + return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b)) + } + // n == 1 && err == io.EOF + return nil +} diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 00000000..a68e67f0 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,188 @@ + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 00000000..95ec014e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 00000000..085cddc4 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 00000000..2befd553 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 00000000..84f84995 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 00000000..0a7037ad --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 00000000..f4507917 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 00000000..93a86327 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 00000000..25808000 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 00000000..5958822f --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 00000000..190362f2 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 00000000..36d6b883 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 00000000..d60a6b6b --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 00000000..8110ce3c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/manifest b/vendor/manifest new file mode 100644 index 00000000..963823b9 --- /dev/null +++ b/vendor/manifest @@ -0,0 +1,146 @@ +{ + "version": 0, + "dependencies": [ + { + "importpath": "github.com/42wim/matterbridge-plus/bridge", + "repository": "https://github.com/42wim/matterbridge-plus", + "revision": "3c673e1d48339ab82788ade57394fd1b9bd4c990", + "branch": "master", + "path": "/bridge", + "notests": true + }, + { + "importpath": "github.com/42wim/matterbridge-plus/matterclient", + "repository": "https://github.com/42wim/matterbridge-plus", + "revision": "3c673e1d48339ab82788ade57394fd1b9bd4c990", + "branch": "master", + "path": "/matterclient", + "notests": true + }, + { + "importpath": "github.com/42wim/matterbridge/matterhook", + "repository": "https://github.com/42wim/matterbridge", + "revision": "6b18257185b1830bd2eff83fae30bdd2055f78b0", + "branch": "master", + "path": "/matterhook", + "notests": true + }, + { + "importpath": "github.com/Sirupsen/logrus", + "repository": "https://github.com/Sirupsen/logrus", + "revision": "4b6ea7319e214d98c938f12692336f7ca9348d6b", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/alecthomas/log4go", + "repository": "https://github.com/alecthomas/log4go", + "revision": "e5dc62318d9bd58682f1dceb53a4b24e8253682f", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/gorilla/schema", + "repository": "https://github.com/gorilla/schema", + "revision": "ddf016c1034e9cfd3eb5b276f626c8f04d765f6f", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/gorilla/websocket", + "repository": "https://github.com/gorilla/websocket", + "revision": "e2e3d8414d0fbae04004f151979f4e27c6747fe7", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/jpillora/backoff", + "repository": "https://github.com/jpillora/backoff", + "revision": "8dc7c274049d8fa405b51f6fbec11db195a977cd", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/mattermost/platform/model", + "repository": "https://github.com/mattermost/platform", + "revision": "3803750fb189880eb4c4b6d41fdca1e6f162b116", + "branch": "master", + "path": "/model", + "notests": true + }, + { + "importpath": "github.com/nicksnyder/go-i18n/i18n", + "repository": "https://github.com/nicksnyder/go-i18n", + "revision": "37e5c2de3e03e4b82693e3fcb4a6aa2cc4eb07e3", + "branch": "master", + "path": "/i18n", + "notests": true + }, + { + "importpath": "github.com/pborman/uuid", + "repository": "https://github.com/pborman/uuid", + "revision": "c55201b036063326c5b1b89ccfe45a184973d073", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/peterhellberg/giphy", + "repository": "https://github.com/peterhellberg/giphy", + "revision": "f9e0363118602c138e4d9dad66c261b15f26d31a", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/sorcix/irc", + "repository": "https://github.com/sorcix/irc", + "revision": "2f0c85e4ec6590c2c6f6ef93b0a86021281ea193", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/thoj/go-ircevent", + "repository": "https://github.com/thoj/go-ircevent", + "revision": "da78ed515c0f0833e7a92c7cc52898176198e2c1", + "branch": "master", + "notests": true + }, + { + "importpath": "golang.org/x/crypto/bcrypt", + "repository": "https://go.googlesource.com/crypto", + "revision": "3fbbcd23f1cb824e69491a5930cfeff09b12f4d2", + "branch": "master", + "path": "/bcrypt", + "notests": true + }, + { + "importpath": "golang.org/x/crypto/blowfish", + "repository": "https://go.googlesource.com/crypto", + "revision": "3fbbcd23f1cb824e69491a5930cfeff09b12f4d2", + "branch": "master", + "path": "/blowfish", + "notests": true + }, + { + "importpath": "golang.org/x/net/http2/hpack", + "repository": "https://go.googlesource.com/net", + "revision": "e45385e9b226f570b1f086bf287b25d3d4117776", + "branch": "master", + "path": "/http2/hpack", + "notests": true + }, + { + "importpath": "gopkg.in/gcfg.v1", + "repository": "https://gopkg.in/gcfg.v1", + "revision": "083575c3955c85df16fe9590cceab64d03f5eb6e", + "branch": "master", + "notests": true + }, + { + "importpath": "gopkg.in/yaml.v2", + "repository": "https://gopkg.in/yaml.v2", + "revision": "a83829b6f1293c91addabc89d0571c246397bbf4", + "branch": "master", + "notests": true + } + ] +} \ No newline at end of file -- cgit v1.2.3